spatstat.explore/0000755000176200001440000000000014737455262013611 5ustar liggesusersspatstat.explore/tests/0000755000176200001440000000000014611073330014732 5ustar liggesusersspatstat.explore/tests/testsR1.R0000644000176200001440000000437214611073330016430 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/rhohat.R ## ## Test all combinations of options for rhohatCalc ## ## $Revision: 1.6 $ $Date: 2022/05/22 08:03:48 $ local({ if(FULLTEST) { X <- rpoispp(function(x,y){exp(3+3*x)}) Z <- as.im(function(x,y) { x }, Window(X)) f <- funxy(function(x,y) { y + 1 }, Window(X)) ## rhohat.ppp ## done in example(rhohat): ## rhoA <- rhohat(X, "x") ## rhoB <- rhohat(X, "x", method="reweight") ## rhoC <- rhohat(X, "x", method="transform") ## alternative smoother (if package locfit available) rhoA <- rhohat(X, "x", smoother="local") rhoB <- rhohat(X, "x", smoother="local", method="reweight") rhoC <- rhohat(X, "x", smoother="local", method="transform") #' code blocks rhoD <- rhohat(X, "y", positiveCI=TRUE) rhoE <- rhohat(X, Z, positiveCI=TRUE) #' weights rhoF <- rhohat(X, Z, weights=f(X)) rhoG <- rhohat(X, Z, weights=f) rhoH <- rhohat(X, Z, weights=as.im(f)) lam <- as.im(function(x,y) {exp(3+2*x)}, W=Window(Z)) ## Baseline rhoAb <- rhohat(X, "x", baseline=lam) rhoBb <- rhohat(X, "x", method="reweight", baseline=lam) rhoCb <- rhohat(X, "x", method="transform", baseline=lam) ## Horvitz-Thompson rhoAH <- rhohat(X, "x", horvitz=TRUE) rhoBH <- rhohat(X, "x", method="reweight", horvitz=TRUE) rhoCH <- rhohat(X, "x", method="transform", horvitz=TRUE) ## class support plot(rhoA) plot(rhoA, rho ~ x, shade=NULL) plot(rhoA, log(rho) ~ x, shade=NULL) plot(rhoA, log(.) ~ x) ## rho2hat r2xy <- rho2hat(X, "x", "y") r2xyw <- rho2hat(X, "x", "y", method="reweight") print(r2xyw) plot(r2xy, do.points=TRUE) xcoord <- function(x,y) x ycoord <- function(x,y) y xim <- as.im(xcoord, W=Window(X)) r2fi <- rho2hat(X, ycoord, xim) r2if <- rho2hat(X, xim, ycoord) } }) spatstat.explore/tests/testsP2.R0000644000176200001440000000066214611073330016425 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) spatstat.explore/tests/testsR2.R0000644000176200001440000000066214611073330016427 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) spatstat.explore/tests/testsK.R0000644000176200001440000002645514611073330016346 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/kernelstuff.R #' #' $Revision: 1.2 $ $Date: 2023/11/05 01:49:45 $ local({ if(FULLTEST) { #' test all cases in kernels.R kernames <- c("gaussian", "rectangular", "triangular", "epanechnikov", "biweight", "cosine", "optcosine") X <- rnorm(20) U <- runif(20) for(ker in kernames) { dX <- dkernel(X, ker) fX <- pkernel(X, ker) qU <- qkernel(U, ker) m0 <- kernel.moment(0, 0, ker) m1 <- kernel.moment(1, 0, ker) m2 <- kernel.moment(2, 0, ker) m3 <- kernel.moment(3, 0, ker) } } }) #' #' tests/Kfuns.R #' #' Various K and L functions and pcf #' #' $Revision: 1.43 $ $Date: 2022/06/17 01:47:08 $ #' #' Assumes 'EveryStart.R' was run if(FULLTEST) { Cells <- cells Amacrine <- amacrine Redwood <- redwood } else { ## reduce numbers of data + dummy points spatstat.options(npixel=32, ndummy.min=16) Cells <- cells[c(FALSE, TRUE)] Amacrine <- amacrine[c(FALSE, TRUE)] Redwood <- redwood[c(FALSE, TRUE)] } myfun <- function(x,y){(x+1) * y } # must be outside local({ if(FULLTEST) { #' supporting code rmax.rule("Kscaled", owin(), 42) implemented.for.K(c("border", "bord.modif", "translate", "good", "best"), "polygonal", TRUE) implemented.for.K(c("border", "bord.modif", "translate", "good", "best"), "mask", TRUE) implemented.for.K(c("border", "isotropic"), "mask", TRUE) implemented.for.K(c("border", "isotropic"), "mask", FALSE) #' shortcuts D <- density(Cells) K <- Kborder.engine(Cells, rmax=0.4, weights=D, ratio=TRUE) K <- Knone.engine(Cells, rmax=0.4, weights=D, ratio=TRUE) allcor <- c("none", "border", "bord.modif","isotropic", "translate") K <- Krect.engine(Cells, rmax=0.4, ratio=TRUE, correction=allcor) K <- Krect.engine(Cells, rmax=0.4, ratio=TRUE, correction=allcor, weights=D) K <- Krect.engine(Cells, rmax=0.4, ratio=TRUE, correction=allcor, use.integers=FALSE) #' Kest special code blocks K <- Kest(Cells, var.approx=TRUE, ratio=FALSE) Z <- distmap(Cells) + 1 Kb <- Kest(Cells, correction=c("border","bord.modif"), weights=Z, ratio=TRUE) Kn <- Kest(Cells, correction="none", weights=Z, ratio=TRUE) Knb <- Kest(Cells, correction=c("border","bord.modif","none"), weights=Z, ratio=TRUE) } if(ALWAYS) { bigint <- 50000 # This is only "big" on a 32-bit system where # sqrt(.Machine$integer.max) = 46340.9 X <- runifpoint(bigint) Z <- as.im(1/bigint, owin()) Kb <- Kest(X, correction=c("border","bord.modif"), rmax=0.02, weights=Z, ratio=TRUE) } if(FULLTEST) { Kn <- Kest(X, correction="none", rmax=0.02, weights=Z, ratio=TRUE) Knb <- Kest(X, correction=c("border","bord.modif","none"), rmax=0.02, weights=Z, ratio=TRUE) #' pcf.ppp special code blocks pr <- pcf(Cells, ratio=TRUE, var.approx=TRUE) pc <- pcf(Cells, domain=square(0.5)) pcr <- pcf(Cells, domain=square(0.5), ratio=TRUE) pw <- pcf(Redwood, correction="none") pwr <- pcf(Redwood, correction="none", ratio=TRUE) pv <- pcf(Redwood, kernel="rectangular") p1 <- pcf(Redwood[1]) #' pcf.fv K <- Kest(Redwood) g <- pcf(K, method="a") g <- pcf(K, method="c") g <- pcf(K, method="d") #' Kinhom code blocks X <- rpoispp(function(x,y) { 100 * x }, 100, square(1)) lambda <- 100 * X$x Kin <- Kinhom(X, lambda, correction=c("none", "border")) lambda2 <- outer(lambda, lambda, "*") Ki2 <- Kinhom(X, lambda2=lambda2, diagonal=FALSE, correction=c("translate", "isotropic")) } if(ALWAYS) { #' edge corrections rr <- rep(0.1, npoints(Cells)) eC <- edge.Ripley(Cells, rr) eI <- edge.Ripley(Cells, rr, method="interpreted") if(max(abs(eC-eI)) > 0.1) stop("Ripley edge correction results do not match") } if(FULLTEST) { a <- rmax.Ripley(square(1)) a <- rmax.Rigid(square(1)) a <- rmax.Ripley(as.polygonal(square(1))) a <- rmax.Rigid(as.polygonal(square(1))) a <- rmax.Ripley(letterR) a <- rmax.Rigid(letterR) } if(ALWAYS) { #' run slow code for edge correction and compare results op <- spatstat.options(npixel=128) X <- Redwood[c(TRUE, FALSE, FALSE, FALSE)] Window(X) <- as.polygonal(Window(X)) Eapprox <- edge.Trans(X) Eexact <- edge.Trans(X, exact=TRUE) maxrelerr <- max(abs(1 - range(Eapprox/Eexact))) if(maxrelerr > 0.1) stop(paste("Exact and approximate algorithms for edge.Trans disagree by", paste0(round(100*maxrelerr), "%")), call.=FALSE) spatstat.options(op) } }) local({ if(FULLTEST) { #' ---- multitype ------ K <- Kcross(Amacrine, correction=c("none", "bord.modif")) K <- Kcross(Amacrine, correction=c("none", "bord", "bord.modif"), ratio=TRUE) #' inhomogeneous multitype K2 <- Kcross.inhom(Amacrine, lambdaX=densityfun(Amacrine)) K3 <- Kcross.inhom(Amacrine, lambdaX=density(Amacrine, at="points")) K5 <- Kcross.inhom(Amacrine, correction="bord.modif") #' markconnect, markcorr M <- markconnect(Amacrine, "on", "off", normalise=TRUE) M <- markcorr(longleaf, normalise=TRUE, correction=c("isotropic", "translate", "border", "none")) M <- markcorr(longleaf, normalise=TRUE, fargs=list()) #' Kmark (=markcorrint) X <- runifpoint(100) %mark% runif(100) km <- Kmark(X, f=atan2) km <- Kmark(X, f1=sin) km <- Kmark(X, f="myfun") aa <- Kmark(X, normalise=FALSE, returnL=FALSE) aa <- Kmark(X, normalise=FALSE, returnL=TRUE) aa <- Kmark(X, normalise=TRUE, returnL=FALSE) aa <- Kmark(X, normalise=TRUE, returnL=TRUE) } }) local({ if(FULLTEST) { #' various modified K functions #' #' directional K functions #' a <- Ksector(swedishpines, -pi/2, pi/2, units="radians", correction=c("none", "border", "bord.modif", "Ripley", "translate"), ratio=TRUE) plot(a) #' #' local K functions #' Z <- as.im(intensity(swedishpines), W=Window(swedishpines)) ZX <- Z[swedishpines] a <- localLinhom(swedishpines, lambda=Z) a <- localLinhom(swedishpines, lambda=ZX) a <- localLinhom(swedishpines, lambda=Z, correction="none") a <- localLinhom(swedishpines, lambda=Z, correction="translate") a <- localLcross(Amacrine) a <- localLcross(Amacrine, from="off", to="off") a <- localKdot(Amacrine) a <- localLdot(Amacrine) a <- localKcross.inhom(Amacrine) a <- localLcross.inhom(Amacrine) Zed <- solapply(intensity(amacrine), as.im, W=Window(amacrine)) Lum <- evaluateCovariateAtPoints(Zed, Amacrine) moff <- (marks(Amacrine) == "off") a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Lum) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaFrom=Lum[moff], lambdaTo=Lum[!moff]) a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed, correction="none") a <- localLcross.inhom(Amacrine, from="off", to="on", lambdaX=Zed, correction="translate") #' #' cases of resolve.lambdacross #' h <- resolve.lambdacross(Amacrine, moff, !moff) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaX=Zed) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaX=Lum) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaI=Zed[["off"]], lambdaJ=Zed[["on"]]) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaI=Lum[moff], lambdaJ=Lum[!moff]) d <- densityfun(unmark(Amacrine), sigma=0.1) dm <- lapply(split(Amacrine), densityfun, sigma=0.1) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaX=d) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaI=dm[["off"]], lambdaJ=dm[["on"]]) h <- resolve.lambdacross(Amacrine, moff, !moff, lambdaX=function(x,y,m){ d(x,y) }) #' #' multitype inhomogeneous pcf #' g <- pcfcross.inhom(Amacrine, lambdaI=dm[["off"]], lambdaJ=dm[["on"]]) #' #' lohboot code blocks #' Ared <- lohboot(Redwood, fun="Kest", block=TRUE, Vcorrection=TRUE, global=FALSE, correction="none") Bred <- lohboot(Redwood, block=TRUE, basicboot=TRUE, global=FALSE) Cred <- lohboot(Redwood, fun=Kest, block=TRUE, global=TRUE, correction="translate") Dred <- lohboot(Redwood, Lest) Kred <- lohboot(Redwood, Kinhom) Lred <- lohboot(Redwood, Linhom) gred <- lohboot(Redwood, pcfinhom, sigma=0.1) #' X <- runifpoint(100, letterR) AX <- lohboot(X, block=TRUE, nx=7, ny=10) #' multitype b <- lohboot(Amacrine, Kcross) b <- lohboot(Amacrine, Lcross) b <- lohboot(Amacrine, Kdot) b <- lohboot(Amacrine, Ldot) b <- lohboot(Amacrine, Kcross.inhom) b <- lohboot(Amacrine, Lcross.inhom) ## Kscaled A <- Lscaled(japanesepines, renormalise=TRUE, correction="all") } }) local({ if(ALWAYS) { #' From Ege, in response to a stackoverflow question. #' The following example has two points separated by r = 1 with 1/4 of the #' circumference outside the 10x10 window (i.e. area 100). #' Thus the value of K^(r) should jump from 0 to #' 100/(2\cdot 1)\cdot ((3/4)^{-1} + (3/4)^{-1}) = 100 \cdot 4/3 = 133.333. x <- c(4.5,5.5) y <- c(10,10)-sqrt(2)/2 W <- square(10) X <- ppp(x, y, W) compere <- function(a, b, where, tol=1e-6) { descrip <- paste("discrepancy in isotropic edge correction", where) err <- as.numeric(a) - as.numeric(b) maxerr <- max(abs(err)) blurb <- paste(descrip, "is", paste0(signif(maxerr, 4), ","), if(maxerr > tol) "exceeding" else "within", "tolerance of", tol) message(blurb) if(maxerr > tol) { message(paste("Discrepancies:", paste(err, collapse=", "))) stop(paste("excessive", descrip), call.=FALSE) } invisible(TRUE) } ## Testing: eX <- edge.Ripley(X, c(1,1)) compere(eX, c(4/3,4/3), "at interior point of rectangle") ## Corner case: Y <- X Y$x <- X$x-4.5+sqrt(2)/2 eY <- edge.Ripley(Y, c(1,1)) compere(eY, c(2,4/3), "near corner of rectangle") ## Invoke polygonal code Z <- rotate(Y, pi/4) eZdebug <- edge.Ripley(Z, c(1,1), internal=list(debug=TRUE)) compere(eZdebug, c(2,4/3), "at interior point of polygon (debug on)") ## test validity without debugger,in case of quirks of compiler optimisation eZ <- edge.Ripley(Z, c(1,1)) compere(eZ, c(2,4/3), "at interior point of polygon (debug off)") } }) reset.spatstat.options() spatstat.explore/tests/testsM.R0000644000176200001440000000545314611073330016343 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/markcor.R ## ## Tests of mark correlation code (etc) ## ## $Revision: 1.7 $ $Date: 2020/11/25 01:23:32 $ local({ if(ALWAYS) { ## check.testfun checks equality of functions ## and is liable to break if the behaviour of all.equal is changed fe <- function(m1, m2) {m1 == m2} fm <- function(m1, m2) {m1 * m2} fs <- function(m1, m2) {sqrt(m1)} if(check.testfun(fe, X=amacrine)$ftype != "equ") warning("check.testfun fails to recognise mark equality function") if(check.testfun(fm, X=longleaf)$ftype != "mul") warning("check.testfun fails to recognise mark product function") check.testfun(fs, X=longleaf) check.testfun("mul") check.testfun("equ") } if(FULLTEST) { ## test all is well in Kmark -> Kinhom MA <- Kmark(amacrine,function(m1,m2){m1==m2}) set.seed(42) AR <- rlabel(amacrine) MR <- Kmark(AR,function(m1,m2){m1==m2}) if(isTRUE(all.equal(MA,MR))) stop("Kmark unexpectedly ignores marks") ## cover code blocks in markcorr() X <- runifpoint(100) %mark% runif(100) Y <- X %mark% data.frame(u=runif(100), v=runif(100)) ww <- runif(100) fone <- function(x) { x/2 } ffff <- function(x,y) { fone(x) * fone(y) } aa <- markcorr(Y) bb <- markcorr(Y, ffff, weights=ww, normalise=TRUE) bb <- markcorr(Y, ffff, weights=ww, normalise=FALSE) bb <- markcorr(Y, f1=fone, weights=ww, normalise=TRUE) bb <- markcorr(Y, f1=fone, weights=ww, normalise=FALSE) ## markcrosscorr a <- markcrosscorr(betacells, normalise=FALSE) if(require(sm)) { b <- markcrosscorr(betacells, method="sm") } ## Vmark with normalisation v <- Vmark(spruces, normalise=TRUE) v <- Vmark(finpines, normalise=TRUE) } }) #' tests/mctests.R #' Monte Carlo tests #' (mad.test, dclf.test, envelopeTest, hasenvelope) #' $Revision: 1.5 $ $Date: 2022/05/23 04:09:49 $ local({ if(FULLTEST) { envelopeTest(cells, Lest, exponent=1, nsim=9, savepatterns=TRUE) (a3 <- envelopeTest(cells, Lest, exponent=3, nsim=9, savepatterns=TRUE)) envelopeTest(a3, Lest, exponent=3, nsim=9, alternative="less") envelopeTest(redwood, Lest, exponent=1, nsim=19, rinterval=c(0, 0.1), alternative="greater", clamp=TRUE) envelopeTest(redwood, pcf, exponent=Inf, nsim=19, rinterval=c(0, 0.1), alternative="greater", clamp=TRUE) } }) spatstat.explore/tests/testsEtoF.R0000644000176200001440000002661514611073330017007 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/envelopes.R # # Test validity of envelope data # # $Revision: 1.29 $ $Date: 2024/01/10 13:45:29 $ # local({ ## check envelope calls from 'alltypes' if(ALWAYS) a <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE) if(FULLTEST) b <- alltypes(demopat, Kcross, nsim=4, envelope=TRUE, global=TRUE) ## check 'transform' idioms if(ALWAYS) A <- envelope(cells, Kest, nsim=4, transform=expression(. - .x)) if(FULLTEST) B <- envelope(cells, Kest, nsim=4, transform=expression(sqrt(./pi) - .x)) # check conditional simulation if(FULLTEST) { e1 <- envelope(cells, Kest, nsim=4, fix.n=TRUE) e2 <- envelope(amacrine, Kest, nsim=4, fix.n=TRUE) e3 <- envelope(amacrine, Kcross, nsim=4, fix.marks=TRUE) e4 <- envelope(finpines, Kest, nsim=4, fix.n=TRUE) # multiple columns of marks e5 <- envelope(finpines, Kest, nsim=4, fix.marks=TRUE) } ## check pooling of envelopes in global case E1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE) E2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE) p12 <- pool(E1, E2) p12 <- pool(E1, E2, savefuns=TRUE) if(FULLTEST) { F1 <- envelope(cells, Kest, nsim=5, savefuns=TRUE, savepatterns=TRUE, global=TRUE) F2 <- envelope(cells, Kest, nsim=12, savefuns=TRUE, savepatterns=TRUE, global=TRUE) p12 <- pool(F1, F2) p12 <- pool(F1, F2, savefuns=TRUE, savepatterns=TRUE) E1r <- envelope(cells, Kest, nsim=5, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) E2r <- envelope(cells, Kest, nsim=12, savefuns=TRUE, global=TRUE, ginterval=c(0.05, 0.15)) p12r <- pool(E1r, E2r) } if(FULLTEST) { #' as.data.frame.envelope Nsim <- 5 E <- envelope(cells, nsim=Nsim, savefuns=TRUE) A <- as.data.frame(E) B <- as.data.frame(E, simfuns=TRUE) stopifnot(ncol(B) - ncol(A) == Nsim) } if(FULLTEST) { #' cases not covered elsewhere A <- envelope(cells, nsim=5, alternative="less", do.pwrong=TRUE, use.theory=FALSE, savepatterns=TRUE, savefuns=TRUE) print(A) B <- envelope(A, nsim=5, savefuns=TRUE) D <- envelope(cells, "Lest", nsim=5) UU <- envelope(cells, nsim=5, foreignclass="ppp", clipdata=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", global=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="less", global=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", VARIANCE=TRUE) AA <- envelope(cells, nsim=5, jsim=5, alternative="greater", VARIANCE=TRUE) #' spotted by Art Stock - bugs in ratfv class support BB <- envelope(redwood, Kinhom, nsim=5, sigma=bw.scott, ratio=TRUE, correction="border") CC <- envelope(redwood, Kinhom, nsim=5, sigma=bw.scott, global=TRUE, ratio=TRUE, correction="border") DD <- envelope(redwood, Finhom, nsim=5, sigma=bw.scott, ratio=TRUE, correction="trans") EE <- envelope(redwood, Finhom, nsim=5, sigma=bw.scott, global=TRUE, ratio=TRUE, correction="trans") #' envelopes based on sample variance E <- envelope(cells, nsim=8, VARIANCE=TRUE) G <- envelope(cells, nsim=8, VARIANCE=TRUE, use.theory=FALSE, do.pwrong=TRUE) print(G) #' summary method summary(E) summary(envelope(cells, nsim=5, simulate=expression(runifpoint(42)))) #' weights argument H1 <- envelope(cells, nsim=4, weights=npoints, savefuns=TRUE) H2 <- envelope(cells, nsim=4, weights=npoints, savefuns=TRUE) J1 <- envelope(cells, nsim=4, weights=npoints, VARIANCE=TRUE) J2 <- envelope(cells, nsim=4, weights=npoints, VARIANCE=TRUE) #' pooling with weights H <- pool(H1, H2) J <- pool(J1, J2) #' pooling envelopes with non-identical attributes H0 <- envelope(cells, nsim=4, savefuns=TRUE) HH <- pool(H0, H1) #' malformed argument 'simulate' A <- replicate(3, list(list(runifpoint(ex=cells)))) # list(list(ppp), list(ppp), list(ppp)) E <- envelope(cells, simulate=A, nsim=3) #' undocumented/secret K <- envelope(cells, nsim=4, saveresultof=npoints, collectrubbish=TRUE) #' so secret I've even forgotten how to do it M <- envelope(cells, nsim=4, internal=list(eject="patterns")) } if(ALWAYS) { #' Test robustness of envelope() sorting procedure when NA's are present #' Fails with spatstat.utils 1.12-0 set.seed(42) EP <- envelope(longleaf, pcf, nsim=10, nrank=2) #' Test case when the maximum permitted number of failures is exceeded X <- amacrine[1:153] # contains exactly one point with mark='off' #' High probability of generating a pattern with no marks = 'off' E <- envelope(X, Kcross, nsim=39, maxnerr=2, maxerr.action="warn") A <- alltypes(X, Kcross, envelope=TRUE, nsim=39, maxnerr=2) } if(ALWAYS) { #' Internals: envelope.matrix Y <- matrix(rnorm(200), 10, 20) rr <- 1:10 oo <- rnorm(10) zz <- numeric(10) E <- envelope(Y, rvals=rr, observed=oo, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, jsim=1:10) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=TRUE) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=TRUE, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, theory=zz, type="global", use.theory=FALSE, nsim=10) E <- envelope(Y, rvals=rr, observed=oo, type="global", nsim=10, nsim2=10) E <- envelope(Y, rvals=rr, observed=oo, type="global", jsim=1:10, jsim.mean=11:20) if(FULLTEST) print(E) E <- envelope(Y, rvals=rr, observed=oo, type="global", nsim=10, jsim.mean=11:20) E <- envelope(Y, rvals=rr, observed=oo, type="global", jsim=1:10, nsim2=10) } if(ALWAYS) { #' quirk with handmade summary functions ('conserve' attribute) Kdif <- function(X, r=NULL) { # note no ellipsis Y <- split(X) K1 <- Kest(Y[[1]], r=r) K2 <- Kest(Y[[2]], r=r) D <- eval.fv(K1-K2) return(D) } envelope(amacrine, Kdif, nsim=3) } ## close 'local' }) # # tests/fastK.R # # check fast and slow code for Kest # and options not tested elsewhere # # $Revision: 1.5 $ $Date: 2020/04/28 12:58:26 $ # if(ALWAYS) { local({ ## fast code Kb <- Kest(cells, nlarge=0) Ku <- Kest(cells, correction="none") Kbu <- Kest(cells, correction=c("none", "border")) ## slow code, full set of corrections, sqrt transformation, ratios Ldd <- Lest(unmark(demopat), correction="all", var.approx=TRUE, ratio=TRUE) ## Lotwick-Silverman var approx (rectangular window) Loo <- Lest(cells, correction="all", var.approx=TRUE, ratio=TRUE) ## Code for large dataset nbig <- .Machine$integer.max if(!is.null(nbig)) { nn <- ceiling(sqrt(nbig)) if(nn < 1e6) Kbig <- Kest(runifpoint(nn), correction=c("border", "bord.modif", "none"), ratio=TRUE) } ## Kinhom lam <- density(cells, at="points", leaveoneout=TRUE) ## fast code Kib <- Kinhom(cells, lam, nlarge=0) Kiu <- Kest(cells, lam, correction="none") Kibu <- Kest(cells, lam, correction=c("none", "border")) ## slow code Lidd <- Linhom(unmark(demopat), sigma=bw.scott) }) } ## ## tests/fvproblems.R ## ## problems with fv, ratfv and fasp code ## ## $Revision: 1.15 $ $Date: 2020/04/28 12:58:26 $ #' This appears in the workshop notes #' Problem detected by Martin Bratschi if(FULLTEST) { local({ Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } Z <- Jdif(amacrine, i="on") }) } #' #' Test mathlegend code #' local({ K <- Kest(cells) if(FULLTEST) { plot(K) plot(K, . ~ r) plot(K, . - theo ~ r) } if(ALWAYS) { plot(K, sqrt(./pi) ~ r) } if(FULLTEST) { plot(K, cbind(iso, theo) ~ r) plot(K, cbind(iso, theo) - theo ~ r) plot(K, sqrt(cbind(iso, theo)/pi) ~ r) plot(K, cbind(iso/2, -theo) ~ r) plot(K, cbind(iso/2, trans/2) - theo ~ r) } if(FULLTEST) { ## test expansion of .x and .y plot(K, . ~ .x) plot(K, . - theo ~ .x) plot(K, .y - theo ~ .x) } if(ALWAYS) { plot(K, sqrt(.y) - sqrt(theo) ~ .x) } # problems with parsing weird strings in levels(marks(X)) # noted by Ulf Mehlig if(ALWAYS) { levels(marks(amacrine)) <- c("Nasticreechia krorluppia", "Homo habilis") plot(Kcross(amacrine)) plot(alltypes(amacrine, "K")) } if(FULLTEST) { plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, pcfcross)) } }) #' #' Test quirks related to 'alim' attribute if(FULLTEST) { local({ K <- Kest(cells) attr(K, "alim") <- NULL plot(K) attr(K, "alim") <- c(0, 0.1) plot(tail(K)) }) } #' #' Check that default 'r' vector passes the test for fine spacing if(ALWAYS) { local({ a <- Fest(cells) A <- Fest(cells, r=a$r) b <- Hest(heather$coarse) B <- Hest(heather$coarse, r=b$r) # from Cenk Icos X <- runifpoint(100, owin(c(0,3), c(0,10))) FX <- Fest(X) FXr <- Fest(X, r=FX$r) JX <- Jest(X) }) } ##' various functionality in fv.R if(ALWAYS) { local({ M <- cbind(1:20, matrix(runif(100), 20, 5)) A <- as.fv(M) fvlabels(A) <- c("r","%s(r)", "%s[A](r)", "%s[B](r)", "%s[C](r)", "%s[D](r)") A <- rename.fv(A, "M", quote(M(r))) A <- tweak.fv.entry(A, "V1", new.tag="r") A[,3] <- NULL A$hogwash <- runif(nrow(A)) fvnames(A, ".") <- NULL #' bind.fv with qualitatively different functions GK <- harmonise(G=Gest(cells), K=Kest(cells)) G <- GK$G K <- GK$K ss <- c(rep(TRUE, nrow(K)-10), rep(FALSE, 10)) U <- bind.fv(G, K[ss, ], clip=TRUE) #' H <- rebadge.as.crossfun(K, "H", "inhom", 1, 2) H <- rebadge.as.dotfun(K, "H", "inhom", 3) #' text layout op <- options(width=27) print(K) options(width=18) print(K) options(op) #' collapse.fv Kb <- Kest(cells, correction="border") Ki <- Kest(cells, correction="isotropic") collapse.fv(Kb, Ki, same="theo") collapse.fv(anylist(B=Kb, I=Ki), same="theo") collapse.fv(anylist(B=Kb), I=Ki, same="theo") Xlist <- replicate(3, runifpoint(30), simplify=FALSE) Klist <- anylapply(Xlist, Kest) collapse.fv(Klist, same="theo", different=c("iso", "border")) names(Klist) <- LETTERS[24:26] collapse.fv(Klist, same="theo", different=c("iso", "border")) }) } if(FULLTEST) { local({ ## rat K <- Kest(cells, ratio=TRUE) G <- Gest(cells, ratio=TRUE) print(K) compatible(K, K) compatible(K, G) H <- rat(K, attr(K, "numerator"), attr(K, "denominator"), check=TRUE) }) } if(FULLTEST) { local({ ## bug in Jmulti.R colliding with breakpts.R B <- owin(c(0,3), c(0,10)) Y <- superimpose(A=runifpoint(1212, B), B=runifpoint(496, B)) JDX <- Jdot(Y) JCX <- Jcross(Y) Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } E <- envelope(Y, Jdif, nsim=19, i="A", simulate=expression(rlabel(Y))) }) } if(FULLTEST) { local({ #' fasp axes, title, dimnames a <- alltypes(amacrine) a$title <- NULL plot(a, samex=TRUE, samey=TRUE) dimnames(a) <- lapply(dimnames(a), toupper) b <- as.fv(a) }) } if(FULLTEST) { local({ ## plot.anylist (fv) b <- anylist(A=Kcross(amacrine), B=Kest(amacrine)) plot(b, equal.scales=TRUE, main=expression(sqrt(pi))) plot(b, arrange=FALSE) }) } spatstat.explore/tests/testsS.R0000644000176200001440000001376714611073330016360 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/sdr.R #' #' $Revision: 1.2 $ $Date: 2020/05/01 09:59:59 $ if(FULLTEST) { local({ AN <- sdr(bei, bei.extra, method="NNIR") AV <- sdr(bei, bei.extra, method="SAVE") AI <- sdr(bei, bei.extra, method="SIR") AT <- sdr(bei, bei.extra, method="TSE") subspaceDistance(AN$B, AV$B) dimhat(AN$M) }) } ## ## tests/segments.R ## Tests of psp class and related code ## [SEE ALSO: tests/xysegment.R] ## ## $Revision: 1.33 $ $Date: 2022/05/22 08:39:47 $ local({ if(ALWAYS) { # C code #' tests of density.psp Y <- edges(letterR) Window(Y) <- grow.rectangle(Frame(Y), 0.4) YC <- density(Y, 0.2, method="C", edge=FALSE, dimyx=64) YI <- density(Y, 0.2, method="interpreted", edge=FALSE, dimyx=64) YF <- density(Y, 0.2, method="FFT", edge=FALSE, dimyx=64) xCI <- max(abs(YC/YI - 1)) xFI <- max(abs(YF/YI - 1)) cat(paste("xCI =", xCI, "\txFI =", signif(xFI, 5)), fill=TRUE) if(xCI > 0.01) stop(paste("density.psp C algorithm relative error =", xCI)) if(xFI > 0.1) stop(paste("density.psp FFT algorithm relative error =", xFI)) B <- square(0.3) density(Y, 0.2, at=B) density(Y, 0.2, at=B, edge=TRUE, method="C") Z <- runifrect(3, B) density(Y, 0.2, at=Z) density(Y, 0.2, at=Z, edge=TRUE, method="C") } if(FULLTEST) { #' segment clipping in window (bug found by Rolf) set.seed(42) X <- runifpoint(50, letterR) SP <- dirichletEdges(X) #' clip to polygonal window Window(X) <- as.mask(Window(X)) SM <- dirichletEdges(X) #' clip to mask window } if(FULLTEST) { #' test rshift.psp and append.psp with marks (Ute Hahn) m <- data.frame(A=1:10, B=letters[1:10]) g <- gl(3, 3, length=10) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) Y <- rshift(X, radius = 0.1) Y <- rshift(X, radius = 0.1, group=g) #' mark management b <- data.frame(A=1:10) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=b) stopifnot(is.data.frame(marks(X))) Y <- rshift(X, radius = 0.1) Y <- rshift(X, radius = 0.1, group=g) } }) # ## tests/sigtraceprogress.R # ## Tests of *.sigtrace and *.progress # ## $Revision: 1.5 $ $Date: 2020/05/01 09:59:59 $ if(FULLTEST) { local({ plot(dclf.sigtrace(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dclf.progress(redwood, nsim=19, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.sigtrace(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) plot(dg.progress(redwood, nsim=5, alternative="greater", rmin=0.02, verbose=FALSE)) ## test 'leave-two-out' algorithm a <- dclf.sigtrace(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) aa <- dclf.progress(redwood, Lest, nsim=9, use.theory=FALSE, leaveout=2, verbose=FALSE) b <- dg.sigtrace(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2) bb <- dg.progress(redwood, Lest, nsim=5, use.theory=FALSE, leaveout=2, verbose=FALSE) ## other code blocks e <- mad.progress(redwood, nsim=5) e <- mad.progress(redwood, nsim=19, alpha=0.05) f <- dclf.progress(redwood, nsim=5, scale=function(x) x^2) f <- dclf.progress(redwood, nsim=5, normalize=TRUE, deflate=TRUE) g <- dg.progress(redwood, nsim=5, scale=function(x) x^2) g <- dg.progress(redwood, nsim=5, normalize=TRUE, deflate=TRUE) }) } #' #' tests/ssf.R #' #' Tests of 'ssf' class #' #' $Revision: 1.5 $ $Date: 2020/12/04 08:02:25 $ #' if(FULLTEST) { local({ Y <- cells[1:5] X <- rsyst(Window(Y), 5) Z <- runifpoint(3, Window(Y)) f1 <- ssf(X, nncross(X,Y,what="dist")) f2 <- ssf(X, nncross(X,Y,what="dist", k=1:2)) image(f1) g1 <- as.function(f1) g1(Z) g2 <- as.function(f2) g2(Z) plot(f1, style="contour") plot(f1, style="imagecontour") contour(f1) apply.ssf(f2, 1, sum) range(f1) min(f1) max(f1) integral(f1, weights=tile.areas(dirichlet(X))) }) } #' #' tests/sumfun.R #' #' Tests of code for summary functions #' #' $Revision: 1.9 $ $Date: 2022/05/22 08:45:23 $ if(ALWAYS) { # involves C code local({ W <- owin(c(0,1), c(-1/2, 0)) Gr <- Gest(redwood, correction="all",domain=W) Fr <- Fest(redwood, correction="all",domain=W) Jr <- Jest(redwood, correction="all",domain=W) F0 <- Fest(redwood[FALSE], correction="all") Fh <- Fest(humberside, domain=erosion(Window(humberside), 100)) FIr <- Finhom(redwood, savelambda=TRUE, ratio=TRUE) JIr <- Jinhom(redwood, savelambda=TRUE, ratio=TRUE) Ga <- Gcross(amacrine, correction="all") Ia <- Iest(amacrine, correction="all") lam <- intensity(amacrine) lmin <- 0.9 * min(lam) nJ <- sum(marks(amacrine) == "off") FM <- FmultiInhom(amacrine, marks(amacrine) == "off", lambdaJ=rep(lam["off"], nJ), lambdamin = lmin) GM <- GmultiInhom(amacrine, marks(amacrine) == "on", marks(amacrine) == "off", lambda=lam[marks(amacrine)], lambdamin=lmin, ReferenceMeasureMarkSetI=42) a <- compileCDF(D=nndist(redwood), B=bdist.points(redwood), r=seq(0, 1, length=256)) #' Tstat (triplet) function, all code blocks a <- Tstat(redwood, ratio=TRUE, correction=c("none", "border", "bord.modif", "translate")) ## distance argument spacing and breakpoints e <- check.finespacing(c(0,1,2), eps=0.1, action="silent") b <- as.breakpts(pi, 20) b <- as.breakpts(42, max=pi, npos=20) b <- even.breaks.owin(letterR) }) } spatstat.explore/tests/funky.tab0000644000176200001440000000231514611073330016557 0ustar liggesusers"x" "y" "marks" 6.25106167113559 2.81410158672916 "A" 8.11380534821364 5.21282932747038 "A" 8.71254438727444 4.34662208775827 "A" 9.44433654612653 3.81357147870467 "A" 10.9744474237264 4.74641004454848 "A" 11.9058192622654 5.34609197973378 "A" 14.7664613377781 8.34450165566032 "A" 18.159315892456 10.8764920486649 "A" 22.2839626059859 12.4090125496941 "A" 29.8679904340894 14.3413210075134 "A" 16.2300456554823 8.21123900339692 "A" 15.2321472570476 8.07797635113351 "A" 15.3652003768389 9.07744624310903 "A" 5.65232263207479 2.28105097767555 "B" 8.37991158779622 5.67924861039229 "B" 8.71254438727444 4.27999076162657 "B" 9.04517718675266 3.88020280483637 "B" 10.3091818247699 4.54651606615337 "B" 11.040973983622 5.87914258878739 "B" 11.4401333429959 5.27946065360208 "B" 14.8995144575694 7.87808237273841 "B" 18.4919486919342 10.4767040918747 "B" 18.8245814914124 11.0097547009283 "B" 20.0885861294297 10.2101787873479 "B" 20.2881658091166 10.3434414396113 "B" 26.4751358794115 13.0086944848794 "B" 27.4730342778462 13.541745093933 "B" 32.4625262700196 13.4751137678013 "B" 32.4625262700196 15.0076342688304 "B" 33.6600043481412 14.8077402904353 "B" 35.7223277049061 15.6073162040157 "B" 24.8119718820204 12.0092245929039 "B" spatstat.explore/tests/testsQ.R0000644000176200001440000000066214611073330016344 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) spatstat.explore/tests/testsAtoC.R0000644000176200001440000000654114611073330016774 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## tests/cdf.test.R local({ NSIM <- 9 op <- spatstat.options(ndummy.min=16, npixel=32) AA <- split(ants, un=FALSE) AC <- AA[["Cataglyphis"]] AM <- AA[["Messor"]] DM <- distmap(AM) if(ALWAYS) { ## Check cdf.test with strange data ## Marked point patterns with some marks not represented ## should produce a warning, rather than a crash: cdf.test(AC, DM) } if(FULLTEST) { ## should be OK: cdf.test(unmark(AC), DM) cdf.test(unmark(AC), DM, "cvm") cdf.test(unmark(AC), DM, "ad") ## other code blocks cdf.test(finpines, "x") } }) #' tests/circular.R #' #' Circular data and periodic distributions #' #' $Revision: 1.4 $ $Date: 2020/04/28 12:58:26 $ local({ if(ALWAYS) { a <- pairorient(redwood, 0.05, 0.15, correction="none") rose(a) } if(FULLTEST) { b <- pairorient(redwood, 0.05, 0.15, correction="best") rose(b, start="N", clockwise=TRUE) } if(ALWAYS) { #' arcs on the circle #' (depends on numerical behaviour) set.seed(19171025) aa <- replicate(7, runif(1, 0, 2*pi) + c(0, runif(1, 0, pi)), simplify=FALSE) bb <- circunion(aa) assertsingle <- function(x, a, id) { y <- circunion(x) if(length(y) != 1 || max(abs(y[[1]] - a)) > .Machine$double.eps) stop(paste("Incorrect result from circunion in case", id), call.=FALSE) invisible(NULL) } assertsingle(list(c(pi/3, pi), c(pi/2, 3*pi/2)), c(pi/3, 3*pi/2), 1) assertsingle(list(c(0, pi/2), c(pi/4, pi)), c(0,pi), 2) assertsingle(list(c(-pi/4, pi/2), c(pi/4, pi)), c((2-1/4)*pi, pi), 3) } }) #' #' tests/closecore.R #' #' check 'closepairs/crosspairs' code #' invoked in core package #' #' $Revision: 1.4 $ $Date: 2021/04/17 04:16:43 $ #' #' ------- All this code must be run on every hardware ------- #' local({ #' weightedclosepairs is in wtdclosepair.R wi <- weightedclosepairs(redwood, 0.05, "isotropic") if(FULLTEST) { wt <- weightedclosepairs(redwood, 0.05, "translate") wp <- weightedclosepairs(redwood, 0.05, "periodic") } #' markmarkscatter uses closepairs.pp3 X <- runifpoint3(100) marks(X) <- runif(100) markmarkscatter(X, 0.2) if(FULLTEST) { markmarkscatter(X[FALSE], 0.2) } }) #' #' contact.R #' #' Check machinery for first contact distributions #' #' $Revision: 1.8 $ $Date: 2021/04/17 02:25:55 $ local({ if(ALWAYS) { #' reduce complexity Y <- as.mask(heather$coarse, dimyx=c(50, 25)) X <- runifpoint(100, win = complement.owin(Y)) if(FULLTEST) G <- Gfox(X, Y) J <- Jfox(X, Y) Y <- as.polygonal(Y) X <- runifpoint(100, win = complement.owin(Y)) if(FULLTEST) G <- Gfox(X, Y) J <- Jfox(X, Y) op <- spatstat.options(exactdt.checks.data=TRUE) U <- exactdt(X) spatstat.options(op) } }) reset.spatstat.options() spatstat.explore/tests/testsUtoZ.R0000644000176200001440000000066214611073330017045 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) spatstat.explore/tests/testsL.R0000644000176200001440000000121114611073330016326 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/localpcf.R ## ## temporary test file for localpcfmatrix ## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $ local({ a <- localpcfmatrix(redwood) if(FULLTEST) { a plot(a) a[, 3:5] } }) spatstat.explore/tests/testsGtoJ.R0000644000176200001440000000623114611073330017005 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/gcc323.R ## ## $Revision: 1.3 $ $Date: 2020/04/28 12:58:26 $ ## if(ALWAYS) { # depends on hardware local({ # critical R values that provoke GCC bug #323 a <- marktable(lansing, R=0.25) a <- marktable(lansing, R=0.21) a <- marktable(lansing, R=0.20) a <- marktable(lansing, R=0.10) }) } #' tests/hypotests.R #' Hypothesis tests #' #' $Revision: 1.10 $ $Date: 2023/07/17 07:30:48 $ if(FULLTEST) { local({ hopskel.test(redwood, method="MonteCarlo", nsim=5) #' quadrat test - spatial methods a <- quadrat.test(redwood, 3) domain(a) shift(a, c(1,1)) #' quadrat test - correctness of mapping from table to quadrats Q2 <- quadratcount(humberside, 2, 3) T2 <- suppressWarnings(quadrat.test(Q2)) R2 <- cbind(as.numeric(t(Q2)), round(10 * residuals(T2))) R2correct <- cbind(c(2, 20, 13, 11, 34, 123), c(-46, -12, -62, -41, 50, 134)) if(!all(R2 == R2correct)) stop("Incorrect count-residual map for quadrat.test(2,3)") Q5 <- quadratcount(humberside, 5, 3) T5 <- suppressWarnings(quadrat.test(Q5)) R5 <- cbind(as.numeric(t(Q5)), round(10 * residuals(T5))) R5correct <- cbind( c( 0, 0, 3, 19, 3, 2, 14, 5, 0, 2, 117, 35, 3), c(-19, -33, -42, 16, -37, -49, -28, -35, -5, -21, 295, 40, -32)) if(!all(R5 == R5correct)) stop("Incorrect count-residual map for quadrat.test(5,3)") #' cases of studpermu.test #' X is a hyperframe b <- studpermu.test(pyramidal, nperm=9) b <- studpermu.test(pyramidal, nperm=9, use.Tbar=TRUE) #' X is a list of lists of ppp ZZ <- split(pyramidal$Neurons, pyramidal$group) bb <- studpermu.test(ZZ, nperm=9) #' Issue #115 X <- runifpoint(50, nsim = 3) Y <- runifpoint(3000, nsim = 3) h <- hyperframe(ppp = c(X, Y), group = rep(1:2, 3)) studpermu.test(h, ppp ~ group) #' scan test Z <- scanmeasure(cells, 0.1, method="fft") rr <- c(0.05, 1) scan.test(amacrine, rr, nsim=5, method="binomial", alternative="less") }) } # # tests/imageops.R # # $Revision: 1.43 $ $Date: 2023/08/29 01:03:59 $ # if(FULLTEST) { local({ #' case of "[.im" and "[<-.im" where index is an ssf d <- distmap(cells, dimyx=32) Empty <- cells[FALSE] EmptyFun <- ssf(Empty, numeric(0)) ff <- d[EmptyFun] d[EmptyFun] <- 42 #' Smooth.im -> blur.im with sigma=NULL Z <- as.im(function(x,y) { x - y }, letterR, dimyx=32) ZS <- Smooth(Z) #' deprecated -> im.apply(DA, which.max) Z <- which.max.im(bei.extra) #' rotmean U <- rotmean(Z, origin="midpoint", result="im", padzero=FALSE) #' cases of distcdf distcdf(cells[1:5]) distcdf(W=cells[1:5], dW=1:5) distcdf(W=Window(cells), V=cells[1:5]) distcdf(W=Window(cells), V=cells[1:5], dV=1:5) }) } spatstat.explore/tests/testsD.R0000644000176200001440000004333614611073330016334 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/density.R #' #' Test behaviour of density() methods, #' relrisk(), Smooth() #' and inhomogeneous summary functions #' and idw, adaptive.density, intensity #' and SpatialMedian, SpatialQuantile #' #' $Revision: 1.67 $ $Date: 2024/01/29 07:07:16 $ #' if(!FULLTEST) spatstat.options(npixel=32, ndummy.min=16) local({ # test all cases of density.ppp and densityfun.ppp tryit <- function(..., do.fun=TRUE, badones=FALSE) { Z <- density(cells, ..., at="pixels") Z <- density(cells, ..., at="points") if(do.fun) { f <- densityfun(cells, ...) U <- f(0.1, 0.3) if(badones) { U2 <- f(1.1, 0.3) U3 <- f(1.1, 0.3, drop=FALSE) } } return(invisible(NULL)) } if(ALWAYS) { tryit(0.05) tryit(0.05, diggle=TRUE) tryit(0.05, se=TRUE) tryit(0.05, weights=expression(x)) tryit(0.07, kernel="epa") tryit(sigma=Inf) tryit(0.05, badones=TRUE) } if(FULLTEST) { tryit(0.07, kernel="quartic") tryit(0.07, kernel="disc") tryit(0.07, kernel="epa", weights=expression(x)) tryit(sigma=Inf, weights=expression(x)) } V <- diag(c(0.05^2, 0.07^2)) if(ALWAYS) { tryit(varcov=V) } if(FULLTEST) { tryit(varcov=V, diggle=TRUE) tryit(varcov=V, weights=expression(x)) tryit(varcov=V, weights=expression(x), diggle=TRUE) Z <- distmap(runifpoint(5, Window(cells))) tryit(0.05, weights=Z) tryit(0.05, weights=Z, diggle=TRUE) } trymost <- function(...) tryit(..., do.fun=FALSE) wdf <- data.frame(a=1:42,b=42:1) if(ALWAYS) { trymost(0.05, weights=wdf) trymost(sigma=Inf, weights=wdf) } if(FULLTEST) { trymost(0.05, weights=wdf, diggle=TRUE) trymost(varcov=V, weights=wdf) trymost(varcov=V, weights=expression(cbind(x,y))) } ## check conservation of mass checkconserve <- function(X, xname, sigma, toler=0.01) { veritas <- npoints(X) vino <- integral(density(X, sigma, diggle=TRUE)) relerr <- abs(vino - veritas)/veritas if(relerr > toler) stop(paste("density.ppp(diggle=TRUE) fails to conserve mass:", vino, "!=", veritas, "for", sQuote(xname)), call.=FALSE) return(relerr) } if(FULLTEST) { checkconserve(cells, "cells", 0.15) } if(ALWAYS) { checkconserve(split(chorley)[["lung"]], "lung", 2) } ## run C algorithm 'denspt' opa <- spatstat.options(densityC=TRUE, densityTransform=FALSE) if(ALWAYS) { tryit(varcov=V) } if(FULLTEST) { tryit(varcov=V, weights=expression(x)) trymost(varcov=V, weights=wdf) } spatstat.options(opa) crossit <- function(..., sigma=NULL) { U <- runifpoint(20, Window(cells)) a <- densitycrossEngine(cells, U, ..., sigma=sigma) a <- densitycrossEngine(cells, U, ..., sigma=sigma, diggle=TRUE) invisible(NULL) } if(ALWAYS) { crossit(varcov=V, weights=cells$x) crossit(sigma=Inf) } if(FULLTEST) { crossit(varcov=V, weights=wdf) crossit(sigma=0.1, weights=wdf) crossit(sigma=0.1, kernel="epa", weights=wdf) } ## apply different discretisation rules if(ALWAYS) { Z <- density(cells, 0.05, fractional=TRUE) } if(FULLTEST) { Z <- density(cells, 0.05, preserve=TRUE) Z <- density(cells, 0.05, fractional=TRUE, preserve=TRUE) } ## compare results with different algorithms crosscheque <- function(expr) { e <- as.expression(substitute(expr)) ename <- sQuote(deparse(substitute(expr))) ## interpreted R opa <- spatstat.options(densityC=FALSE, densityTransform=FALSE) val.interpreted <- eval(e) ## established C algorithm 'denspt' spatstat.options(densityC=TRUE, densityTransform=FALSE) val.C <- eval(e) ## new C algorithm 'Gdenspt' using transformed coordinates spatstat.options(densityC=TRUE, densityTransform=TRUE) val.Transform <- eval(e) spatstat.options(opa) if(max(abs(val.interpreted - val.C)) > 0.001) stop(paste("Numerical discrepancy between R and C algorithms in", ename)) if(max(abs(val.C - val.Transform)) > 0.001) stop(paste("Numerical discrepancy between C algorithms", "using transformed and untransformed coordinates in", ename)) invisible(NULL) } ## execute & compare results of density(at="points") with different algorithms wdfr <- cbind(1:npoints(redwood), 2) if(ALWAYS) { crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE)) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE, weights=wdfr[,1])) crosscheque(density(redwood, at="points", sigma=0.13, edge=FALSE, weights=wdfr)) } ## correctness of non-Gaussian kernel calculation leavein <- function(ker, maxd=0.025) { ZI <- density(redwood, 0.12, kernel=ker, edge=FALSE, dimyx=256)[redwood] ZP <- density(redwood, 0.12, kernel=ker, edge=FALSE, at="points", leaveoneout=FALSE) discrep <- max(abs(ZP - ZI))/npoints(redwood) if(discrep > maxd) stop(paste("Discrepancy", signif(discrep, 3), "in calculation for", ker, "kernel")) return(invisible(NULL)) } if(ALWAYS) { leavein("epanechnikov", 0.015) } if(FULLTEST) { leavein("quartic", 0.010) leavein("disc", 0.100) } ## bandwidth selection code blocks sigvec <- 0.01 * 2:15 sigran <- range(sigvec) if(ALWAYS) { bw.ppl(redwood, sigma=sigvec) bw.CvL(redwood, sigma=sigvec) } if(FULLTEST) { bw.ppl(redwood, srange=sigran, ns=5) bw.CvL(redwood, srange=sigran, ns=5) } ## adaptive bandwidth if(ALWAYS) { a <- bw.abram(redwood) } if(FULLTEST) { a <- bw.abram(redwood, pilot=density(redwood, 0.2)) a <- bw.abram(redwood, smoother="densityVoronoi", at="pixels") } ## Kinhom if(ALWAYS) { lam <- density(redwood) K <- Kinhom(redwood, lam) lamX <- density(redwood, at="points") KX <- Kinhom(redwood, lamX) } ## test all code cases of new 'relrisk.ppp' algorithm pants <- function(..., X=ants, sigma=100, se=TRUE) { a <- relrisk(X, sigma=sigma, se=se, ...) return(TRUE) } if(ALWAYS) { pants() pants(diggle=TRUE) pants(edge=FALSE) pants(at="points") pants(casecontrol=FALSE) pants(relative=TRUE) pants(sigma=Inf) pants(sigma=NULL, varcov=diag(c(100,100)^2)) f <- 1/area(Window(ants)) pants(fudge=f) } if(FULLTEST) { pants(diggle=TRUE, at="points") pants(edge=FALSE, at="points", fudge=f) pants(casecontrol=FALSE, relative=TRUE) pants(casecontrol=FALSE,at="points") pants(relative=TRUE,at="points", fudge=f) pants(casecontrol=FALSE, relative=TRUE,at="points") pants(relative=TRUE, control="Cataglyphis", case="Messor", fudge=f) pants(relative=TRUE, control="Cataglyphis", case="Messor", at="points") pants(casecontrol=FALSE, case="Messor", se=FALSE) pants(case=2, at="pixels", relative=TRUE) pants(case=2, at="points", relative=TRUE) pants(case=2, at="pixels", relative=FALSE) pants(case=2, at="points", relative=FALSE) } if(ALWAYS) { ## underflow example from stackoverflow! funky <- scanpp("funky.tab", owin(c(4, 38), c(0.3, 17))) P <- relrisk(funky, 0.5) R <- relrisk(funky, 0.5, relative=TRUE) } ## more than 2 types if(ALWAYS) { pants(X=sporophores) pants(X=sporophores, sigma=20, at="points") pants(X=sporophores, sigma=20, at="points", fudge=f) bw.relrisk(sporophores, method="leastsquares") } if(FULLTEST) { pants(X=sporophores, sigma=20, relative=TRUE, at="points", fudge=f) pants(X=sporophores, sigma=20, at="pixels", se=FALSE) pants(X=sporophores, sigma=20, relative=TRUE, at="pixels", se=FALSE) bw.relrisk(sporophores, method="weightedleastsquares") } ## execute Smooth.ppp and Smoothfun.ppp in all cases stroke <- function(..., Y = longleaf, FUN=TRUE) { Z <- Smooth(Y, ..., at="pixels") Z <- Smooth(Y, ..., at="points", leaveoneout=TRUE) Z <- Smooth(Y, ..., at="points", leaveoneout=FALSE) if(FUN) { f <- Smoothfun(Y, ...) f(120, 80) f(Y[1:2]) f(Y[FALSE]) U <- as.im(f) } return(invisible(NULL)) } if(ALWAYS) { stroke() stroke(5, diggle=TRUE) stroke(5, geometric=TRUE) stroke(1e-6) # generates warning about small bandwidth stroke(5, weights=expression(x)) stroke(5, kernel="epa") stroke(sigma=Inf) stroke(varcov1=diag(c(1,1))) # 'anisotropic' code } if(FULLTEST) { Z <- as.im(function(x,y){abs(x)+1}, Window(longleaf)) stroke(5, weights=Z) stroke(5, weights=runif(npoints(longleaf))) stroke(varcov=diag(c(25, 36))) stroke(varcov=diag(c(25, 36)), weights=runif(npoints(longleaf))) stroke(5, Y=longleaf %mark% 1) stroke(5, Y=cut(longleaf,breaks=3)) stroke(5, weights=Z, geometric=TRUE) g <- function(x,y) { dnorm(x, sd=10) * dnorm(y, sd=10) } stroke(kernel=g, cutoff=30, FUN=FALSE) stroke(kernel=g, cutoff=30, scalekernel=TRUE, sigma=1, FUN=FALSE) } if(FULLTEST) { ## standard errors - single column of marks stroke(sigma=5, se=TRUE) stroke(sigma=5, se=TRUE, loctype="f") w <- runif(npoints(longleaf)) stroke(sigma=5, se=TRUE, weights=w, loctype="r", wtype="i") stroke(sigma=5, se=TRUE, weights=w, loctype="r", wtype="m") stroke(sigma=5, se=TRUE, weights=w, loctype="f", wtype="i") stroke(sigma=5, se=TRUE, weights=w, loctype="f", wtype="m") } niets <- markmean(longleaf, 9) strike <- function(..., Y=finpines) { Z <- Smooth(Y, ..., at="pixels") Z <- Smooth(Y, ..., at="points", leaveoneout=TRUE) Z <- Smooth(Y, ..., at="points", leaveoneout=FALSE) f <- Smoothfun(Y, ...) f(4, 1) f(Y[1:2]) f(Y[FALSE]) U <- as.im(f) return(invisible(NULL)) } if(ALWAYS) { strike() strike(sigma=1.5, kernel="epa") strike(varcov=diag(c(1.2, 2.1))) strike(sigma=1e-6) strike(sigma=Inf) } if(FULLTEST) { strike(sigma=1e-6, kernel="epa") strike(1.5, weights=runif(npoints(finpines))) strike(1.5, weights=expression(y)) strike(1.5, geometric=TRUE) strike(1.5, Y=finpines[FALSE]) flatfin <- finpines %mark% data.frame(a=rep(1, npoints(finpines)), b=2) strike(1.5, Y=flatfin) strike(1.5, Y=flatfin, geometric=TRUE) } if(FULLTEST) { ## standard errors - multivariate marks strike(sigma=1.5, se=TRUE) strike(sigma=1.5, se=TRUE, loctype="f") w <- runif(npoints(finpines)) strike(sigma=1.5, se=TRUE, weights=w, loctype="r", wtype="i") strike(sigma=1.5, se=TRUE, weights=w, loctype="r", wtype="m") strike(sigma=1.5, se=TRUE, weights=w, loctype="f", wtype="i") strike(sigma=1.5, se=TRUE, weights=w, loctype="f", wtype="m") } opx <- spatstat.options(densityTransform=FALSE) if(ALWAYS) { stroke(5, Y=longleaf[order(longleaf$x)], sorted=TRUE) } if(FULLTEST) { strike(1.5, Y=finpines[order(finpines$x)], sorted=TRUE) } spatstat.options(opx) ## detect special cases if(ALWAYS) { Smooth(longleaf[FALSE]) Smooth(longleaf, minnndist(longleaf)) Xconst <- cells %mark% 1 Smooth(Xconst, 0.1) Smooth(Xconst, 0.1, at="points") Smooth(cells %mark% runif(42), sigma=Inf) Smooth(cells %mark% runif(42), sigma=Inf, at="points") Smooth(cells %mark% runif(42), sigma=Inf, at="points", leaveoneout=FALSE) Smooth(cut(longleaf, breaks=4)) } ## code not otherwise reached if(ALWAYS) { smoothpointsEngine(cells, values=rep(1, npoints(cells)), sigma=0.2) } if(FULLTEST) { smoothpointsEngine(cells, values=runif(npoints(cells)), sigma=Inf) smoothpointsEngine(cells, values=runif(npoints(cells)), sigma=1e-16) } ## validity of Smooth.ppp(at='points') Y <- longleaf %mark% runif(npoints(longleaf), min=41, max=43) Z <- Smooth(Y, 5, at="points", leaveoneout=TRUE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=TRUE)") Z <- Smooth(Y, 5, at="points", leaveoneout=FALSE) rZ <- range(Z) if(rZ[1] < 40 || rZ[2] > 44) stop("Implausible results from Smooth.ppp(at=points, leaveoneout=FALSE)") ## compare Smooth.ppp results with different algorithms if(ALWAYS) { crosscheque(Smooth(longleaf, at="points", sigma=6)) wt <- runif(npoints(longleaf)) crosscheque(Smooth(longleaf, at="points", sigma=6, weights=wt)) } if(FULLTEST) { vc <- diag(c(25,36)) crosscheque(Smooth(longleaf, at="points", varcov=vc)) crosscheque(Smooth(longleaf, at="points", varcov=vc, weights=wt)) } ## drop-dimension coding errors if(FULLTEST) { X <- longleaf marks(X) <- cbind(marks(X), 1) Z <- Smooth(X, 5) ZZ <- bw.smoothppp(finpines, hmin=0.01, hmax=0.012, nh=2) # reshaping problem } ## geometric-mean smoothing if(ALWAYS) { U <- Smooth(longleaf, 5, geometric=TRUE) } if(FULLTEST) { UU <- Smooth(X, 5, geometric=TRUE) V <- Smooth(longleaf, 5, geometric=TRUE, at="points") VV <- Smooth(X, 5, geometric=TRUE, at="points") } if(FULLTEST) { ## isotropic and anisotropic cases of bw.smoothppp bi <- bw.smoothppp(longleaf) ba <- bw.smoothppp(longleaf, varcov1=diag(c(1,1))) ## should be equal if(abs(bi-ba) > 0.001) stop(paste("Inconsistency in bw.smoothppp: isotropic =", bi, "!=", ba, "= anisotropic")) } }) reset.spatstat.options() local({ if(ALWAYS) { #' Kmeasure, second.moment.engine #' Expansion of window Zno <- Kmeasure(redwood, sigma=0.2, expand=FALSE) Zyes <- Kmeasure(redwood, sigma=0.2, expand=TRUE) #' All code blocks sigmadouble <- rep(0.1, 2) diagmat <- diag(sigmadouble^2) generalmat <- matrix(c(1, 0.5, 0.5, 1)/100, 2, 2) Z <- Kmeasure(redwood, sigma=sigmadouble) Z <- Kmeasure(redwood, varcov=diagmat) Z <- Kmeasure(redwood, varcov=generalmat) A <- second.moment.calc(redwood, 0.1, what="all", debug=TRUE) B <- second.moment.calc(redwood, varcov=diagmat, what="all") B <- second.moment.calc(redwood, varcov=diagmat, what="all") D <- second.moment.calc(redwood, varcov=generalmat, what="all") PR <- pixellate(redwood) DRno <- second.moment.calc(PR, 0.2, debug=TRUE, expand=FALSE, npts=npoints(redwood), obswin=Window(redwood)) DRyes <- second.moment.calc(PR, 0.2, debug=TRUE, expand=TRUE, npts=npoints(redwood), obswin=Window(redwood)) DR2 <- second.moment.calc(solist(PR, PR), 0.2, debug=TRUE, expand=TRUE, npts=npoints(redwood), obswin=Window(redwood)) Gmat <- generalmat * 100 isoGauss <- function(x,y) {dnorm(x) * dnorm(y)} ee <- evaluate2Dkernel(isoGauss, runif(10), runif(10), varcov=Gmat, scalekernel=TRUE) isoGaussIm <- as.im(isoGauss, square(c(-3,3))) gg <- evaluate2Dkernel(isoGaussIm, runif(10), runif(10), varcov=Gmat, scalekernel=TRUE) ## experimental code op <- spatstat.options(developer=TRUE) DR <- density(redwood, 0.1) spatstat.options(op) } }) local({ if(FULLTEST) { #' bandwidth selection op <- spatstat.options(n.bandwidth=8) bw.diggle(cells) bw.diggle(cells, method="interpreted") # undocumented test ## bw.relrisk(urkiola, hmax=20) is tested in man/bw.relrisk.Rd bw.relrisk(urkiola, hmax=20, method="leastsquares") bw.relrisk(urkiola, hmax=20, method="weightedleastsquares") ZX <- density(swedishpines, at="points") bw.pcf(swedishpines, lambda=ZX) bw.pcf(swedishpines, lambda=ZX, bias.correct=FALSE, simple=FALSE, cv.method="leastSQ") spatstat.options(op) } }) local({ if(FULLTEST) { ## idw Z <- idw(longleaf, power=4) Z <- idw(longleaf, power=4, se=TRUE) ZX <- idw(longleaf, power=4, at="points") ZX <- idw(longleaf, power=4, at="points", se=TRUE) } if(ALWAYS) { ## former bug in densityVoronoi.ppp X <- redwood[1:2] A <- densityVoronoi(X, f=0.51, counting=FALSE, fixed=FALSE, nrep=50, verbose=FALSE) ## dodgy code blocks in densityVoronoi.R A <- adaptive.density(nztrees, nrep=2, f=0.5, counting=TRUE) B <- adaptive.density(nztrees, nrep=2, f=0.5, counting=TRUE, fixed=TRUE) D <- adaptive.density(nztrees, nrep=2, f=0.5, counting=FALSE) E <- adaptive.density(nztrees, nrep=2, f=0.5, counting=FALSE, fixed=TRUE) } if(FULLTEST) { #' adaptive kernel estimation d10 <- nndist(nztrees, k=10) d10fun <- distfun(nztrees, k=10) d10im <- as.im(d10fun) uN <- 2 * runif(npoints(nztrees)) AA <- densityAdaptiveKernel(nztrees, bw=d10) BB <- densityAdaptiveKernel(nztrees, bw=d10, weights=uN) DD <- densityAdaptiveKernel(nztrees, bw=d10fun, weights=uN) EE <- densityAdaptiveKernel(nztrees, bw=d10im, weights=uN) } }) local({ if(FULLTEST) { ## cases of 'intensity' etc a <- intensity(amacrine, weights=expression(x)) SA <- split(amacrine) a <- intensity(SA, weights=expression(x)) a <- intensity(SA, weights=amacrine$x) ## check infrastructure for 'densityfun' f <- densityfun(cells, 0.05) Z <- as.im(f) Z <- as.im(f, W=square(0.5)) } }) local({ if(FULLTEST) { ## other cases of SpatialQuantile.ppp X <- longleaf marks(X) <- round(marks(X), -1) Z <- SpatialMedian(X, 30, type=4) ZX <- SpatialMedian(X, 30, type=4, at="points") ZXP <- SpatialMedian(X, 30, at="points", leaveoneout=FALSE) } }) reset.spatstat.options() spatstat.explore/tests/testsP1.R0000644000176200001440000000112714611073330016421 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/pixelgripes.R ## Problems related to pixellation of windows ## ## $Revision: 1.8 $ $Date: 2022/10/23 06:21:10 $ if(FULLTEST) { local({ }) } spatstat.explore/tests/testsT.R0000644000176200001440000000375614611073330016356 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' #' tests/threedee.R #' #' Tests of 3D code #' #' $Revision: 1.8 $ $Date: 2020/05/02 01:32:58 $ #' local({ X <- runifpoint3(30) Y <- runifpoint3(20) if(FULLTEST) { A <- runifpoint3(10, nsim=2) Z <- ppsubset(X, 2:4) } ## if(ALWAYS) { # includes C code d <- pairdist(X, periodic=TRUE, squared=TRUE) d <- crossdist(X, Y, squared=TRUE) d <- crossdist(X, Y, squared=TRUE, periodic=TRUE) #' h <- has.close(X, 0.2) h <- has.close(X, 0.2, periodic=TRUE) h <- has.close(X, 0.2, Y=Y) h <- has.close(X, 0.2, Y=Y, periodic=TRUE) #' code blocks not otherwise reached rmax <- 0.6 * max(nndist(X)) g <- G3est(X, rmax=rmax, correction="rs") g <- G3est(X, rmax=rmax, correction="km") g <- G3est(X, rmax=rmax, correction="Hanisch") g <- G3est(X, rmax=rmax, sphere="ideal") g <- G3est(X, rmax=rmax, sphere="digital") v <- sphere.volume() v <- digital.volume() #' older code co <- coords(X) xx <- co$x yy <- co$y zz <- co$z gg1 <- g3engine(xx, yy, zz, correction="Hanisch G3") gg2 <- g3engine(xx, yy, zz, correction="minus sampling") ff1 <- f3engine(xx, yy, zz, correction="no") ff2 <- f3engine(xx, yy, zz, correction="minus sampling") } ## if(ALWAYS) { #'class support X <- runifpoint3(10) print(X) print(X %mark% runif(10)) print(X %mark% factor(letters[c(1:5,5:1)])) print(X %mark% data.frame(a=1:10, b=runif(10))) da <- as.Date(paste0("2020-01-0", c(1:5,5:1))) print(X %mark% da) print(X %mark% data.frame(a=1:10, b=da)) } }) spatstat.explore/tests/testsNtoO.R0000644000176200001440000000216114611073330017017 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.explore #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.explore) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/nnstat.R # # Check code that uses nndist/nnwhich # # nnorient() # stienen() # # $Revision: 1.1 $ $Date: 2020/12/04 03:45:44 $ # local({ if(FULLTEST) { #' test nnorient nnorient(cells, domain=erosion(Window(cells), 0.1)) #' degenerate case X <- cells[nndist(cells) > bdist.points(cells)] f <- nnorient(X) #' nnclean A <- nnclean(shapley, k=17, edge.correct=TRUE) B <- nnclean(runifpoint3(300), 3) #' stienen set #' bug when disc radius is zero Y <- unmark(humberside)[40:100] # contains duplicated points stienen(Y) Z <- stienenSet(Y) #' other cases U <- stienen(cells[1]) V <- stienenSet(cells, edge=FALSE) } }) spatstat.explore/MD50000644000176200001440000005250314737455262014126 0ustar liggesusersd8caad495046a479387999e91c9bd57b *DESCRIPTION f1ffad61656618b9e763c7f3a9e6351e *NAMESPACE 5f268f7b439d52650540d1f424cf240b *NEWS d6e3392a80a6e943f6926bab3ce68711 *R/FGmultiInhom.R f85ebd958263c02958f6bc1339b3509a *R/Fest.R 6a6553b98f4929fbabbc24fbdfa7c5ce *R/First.R ce10683c67bf0fadc837c277f508084d *R/GJfox.R 1d7bb030834c482936de898c037b3b14 *R/Gest.R e696907a4a5c4a8c488686e945bd4ee1 *R/Gmulti.R 329b5c34b765bf1c99b3a9c9935d574d *R/Hest.R a50201e1887e8d1ee49bd00d99f7f870 *R/Iest.R 745dd7a270cdc9ed95941ad671c1f5ea *R/Jest.R df71fe989f8d5f7bdd964277df75d89b *R/Jinhom.R aace997bc1d13408e5c32d1059d3ae1e *R/Jmulti.R effbe033106f56c80569472862d01899 *R/Jmulti.inhom.R 5f0eeea1b18a8208cd9f395d20cdfbb6 *R/Kest.R b6ae0647bea5b487a838ae538d9a9a1d *R/Kinhom.R 3b378a3234d3a797f2c15061cd43d642 *R/Kmeasure.R 8f60f7997642317e4520f7ff05c9f3d3 *R/Kmulti.R 1f724eb8f0895d3fa6d62c0a1a6fd245 *R/Kmulti.inhom.R fedb8a7b7896c8e1df0668ea85e12d6d *R/Kscaled.R e8e3ff8296e6bea222f808a02040cdd9 *R/Ksector.R 2a15c3c23318b974e7d3ab82d983d059 *R/Math.fasp.R cf4fb5212185e34ea627c33b31f234b7 *R/Math.fv.R 976a2fe4afc372b8fd25562df4aa7b1a *R/SmoothHeat.R 15200464f18f33ee9e035c9121666bb4 *R/Tstat.R ef20ad4c0a1d98a59f05de0fbd37b61c *R/adaptive.density.R 948e229cfe013701e32cbf3690856c2d *R/allstats.R c2667393272d1b082100c877490f5eb0 *R/alltypes.R 3c2542d3e2ef28fcc16692ee0e77a472 *R/auc.R 9950ab3fc5046b5ea0da4fff17755d93 *R/bermantest.R edde9119c366c2e54fc6ecbf212d89cc *R/blur.R 261e08ae0b54b871aed89d4f7b731e9a *R/blurHeat.R 2d2ac65bcd31f0eb1b1ff987ade0453e *R/boyce.R 796a6ca72e1f199c2517c261a96e1ad2 *R/bw.CvL.R d80f4794b63f364c47783fa8ed4e6540 *R/bw.CvL.adaptive.R 257ba38586dc7288bc2422ef471e4946 *R/bw.CvLHeat.R 6b5f8d9a7e29bd6e0efff9065acf2851 *R/bw.abram.ppp.R e8884b3c1f820a6193f181c57933bc5d *R/bw.diggle.R 15bb45e5bff910b091d22a085ef52bb1 *R/bw.frac.R 0512869a1aa73fbfe25a72a61354bbdd *R/bw.optim.R 197016f1b661ee073991a88e0b3a7afd *R/bw.pcf.R a9b538133c5d3c0f6a193ba670f1ff95 *R/bw.ppl.R c13e3ad08d78e1f397e769bd13bb0393 *R/bw.pplHeat.R 7458e40ffda1e5eda31f1fcf78548f17 *R/bw.scott.R 58d72644384ccac851eedd25fb3ffd74 *R/cdftest.R 79f292bb08d8c15b5ff412d11f35e377 *R/circdensity.R 5db35a42dc935cc181e548e76a88da21 *R/clarkevans.R 2d42a4f493cee5afb4be080a0b8c54e1 *R/clusterset.R aed260eceb31a62a0983c9df71e254fc *R/compileCDF.R c8cdb3cc1e843a19657ad09da27f0922 *R/compileK.R bb22386cb65bd41b9c346005f7cd04c3 *R/dclftest.R e9a5b4ccfe56f19269bea27d74c1dfdb *R/density.ppp.R d95e6940ff6258421be43ebd611f6d15 *R/density.psp.R 458c5b7190433cf6e4f4fc8035cea9ac *R/densityAdaptiveKernel.ppp.R fa8ff7ad73f23ddf425a3ea5e3610974 *R/densityHeat.ppp.R 207eddd48a97367da427f16533d4ce35 *R/densityVoronoi.R eecdb7361ea7ea1ac8d63c1e387caef0 *R/densityfun.R 6706d26fc720c429660657026da0ccac *R/derivfv.R 39c087904ad36d5b19944fc78aa52e61 *R/digestCovariates.R 59e2a22c3418b16cd27b5597b042889a *R/distcdf.R fb70835d414406e402b5f41d2ac2f133 *R/edgeRipley.R 5939d7e2f396dcb76452b322569826d4 *R/edgeTrans.R 9db9e91f5ece8a2c7e70712203255c91 *R/envelope.R d1f86ebbdcbbdd54f8d83d7537d4fb7e *R/envelope3.R dec1b6c9c0823ba09652beabbf488c0f *R/envelopeArray.R df17844c24e8a80ca08c3bba7d856eda *R/eval.fasp.R af1df090c755750894e33a851e7c88f7 *R/eval.fv.R 8ef18a6d5347c6db36f595c177d80ade *R/evaluatecovariates.R 2b4d4a5b0b39224a76db5dfbccd06d74 *R/evidence.R 2796dda6686956b0a068eb6b954988ec *R/exactppm.R 72ed697e696dc80bbdbe56c0713e2231 *R/fasp.R 6911f0734fc4b1da10ce6ffdc96208e3 *R/fgk3.R 6449afdbaf740b8e816798cb0c263bff *R/fryplot.R e4b033825b932f466bdcdb67f4ec7b48 *R/fv.R 7576337da12b9579db9130c9e609822f *R/hasenvelope.R 9b80ec5d17584d7d261d80fc2f0dacfb *R/hopskel.R 9e92c403cd74fa47cb4237f76136ef32 *R/hotbox.R c332c0060f13e0da0ea9f2540e8fe868 *R/idw.R 86186ef14fc5dd88e19883b5d3a710a7 *R/kernel2d.R 5952275b19e85c2d1c2245a6514ee0d5 *R/laslett.R 67c395118252398fc41b5e2c547ae19d *R/localK.R 354b7c0aed7e1c6dd902963754d18c32 *R/localKcross.R 10b8962b454b16200d6504e513e04d92 *R/localpcf.R ecb21bc73b8ee0e94ef472451c75be5a *R/lohboot.R de71aa5aa34883401e91d2a31c3c8d9b *R/markcorr.R 95cda2e43a41b86f31c6b8d68d7a62b0 *R/markmark.R 7ef34fbc1d5aeb5fb3996af3eb4ef136 *R/marktable.R 2cf10f6981f784cbc2752dbaed7b3c57 *R/metriccontact.R 37b2bde043a9b1540160c76895ac67ac *R/morisita.R 756e005fb50a6e5636a8e695f2ddc6ee *R/nnclean.R aba4829868abf5130b72f8f5eaee0da8 *R/nncorr.R 002a93f47df31f5cc259f4ad002fadb8 *R/nndensity.R 1ddc55fe21a5f54976da16fb1c691de4 *R/nnorient.R f65cf0d0651ae12a1d0a21b6f2f9315e *R/pairorient.R 8b9fe3d8d7e8cd65ee1ed48ac996c752 *R/pairs.im.R 67eea266c67b23a37ed07821b322fc82 *R/pcf.R 91d716f5f5ba8caea0095950d8386454 *R/pcfFromK.R c33922aa79d3407c97cf3d2900f5eb2d *R/pcfinhom.R e2440baf5a3b3e8c712b91d79b0f5835 *R/pcfmulti.R b553f8c0c14094cce2d0592031b9dd4b *R/pcfmulti.inhom.R e4224ae7cd5e24cc0df0576490ca1e6e *R/plot.fasp.R 404bbd542510bcadba309fc8b94ad72e *R/plot.fv.R d07d14f531a7de44f9d3c0a24e9efc0a *R/pool.R d06c9bdab82a9d64ccabe3154e91bcb9 *R/ppqq.R 4b2b057c9981cba373f0348346d42c92 *R/progressplots.R 63b86fe8a7dd85b6e12aa57d18db819b *R/quadrattest.R 3f87aec78c984221b6c69e10f4a488ea *R/radcum.R 0f733c947fd26f920f15f62d7b78f3ae *R/rat.R 755de318938aabe55b5da438015b02e2 *R/relrisk.R 233fc7cb44c966736edd58bee84e1670 *R/relriskHeat.R 292ad0ad6710c91248962adec12d3a61 *R/resolve.lambda.R e9e1486502e4d6dc502b69bdc59e4ebd *R/rho2hat.R b60fe677fcc1db9c521eb54fc0b10a79 *R/rhohat.R 120ceb4ce9bb0d6104028f1bc3f38d84 *R/rose.R bc2341bd181171d63a0df581954e08fd *R/rotmean.R fed45946a2a3afc131d4d46e70ae6084 *R/scanstat.R a6eaba9f8161fa3ed032ca1f572a37f5 *R/scriptUtils.R f91342c5f456f5533b80836918f60120 *R/sdr.R 2502c4f4e71342740ae4b1820460167e *R/segtest.R 048ef0497b1b7b5b715a35c0d88bd4f9 *R/sharpen.R c0e7adf01137747788fad043b581c8e7 *R/sigtrace.R 140bcba3e7b1618af06326c0f0d96f0f *R/smooth.ppp.R e5f576f6c283080131987b5c05a58794 *R/smoothfun.R 8f634e368fec6ab9183225c1ed0bd852 *R/smoothfv.R 562a5f049aa85ee2a7c08e55d4d4c867 *R/spatcov.R b25c0380a5f1ffcca2862a0186e80990 *R/spatialQuantile.R 59b0d54cac586ae489c14035a694d109 *R/spatialcdf.R 460b2622b66a61a8a709f0188504b7d2 *R/ssf.R 59c7fff4ecd9e618764ba05c7dc838dc *R/stienen.R 1fee3bd0c8443e0d43dce178c4ec70d6 *R/studpermutest.R b16dfa38099fa525f0c611bfb8e549c9 *R/thresholding.R 5adc6ed40d9b4ea1b8119ed5bc620171 *R/transect.R 11fb0a6f8eabdd98f5edb09252312572 *R/twostage.R f69e6cd021b03289f2d962c648702622 *R/varblock.R 95abec834b4d080fa0b611add9bac836 *R/wtdclosepair.R f2d49fc8f50fef7f76bcb3fc7f34efb4 *inst/CITATION 6dcf0b8756121227edefe758124fb898 *inst/doc/packagesizes.txt 6dcf0b8756121227edefe758124fb898 *inst/info/packagesizes.txt 0092397ec1b97793f1a26f880f3b979f *man/Emark.Rd 5e53668c16bf9ab608fe89f038029027 *man/Extract.fasp.Rd e50f40eb875c124095941fc99a486f36 *man/Extract.fv.Rd 279ecfbecb82ff618333037b97bb953b *man/Extract.ssf.Rd b89ccc24a175c20e90279d776026267c *man/F3est.Rd 23ed3364c64b435cedd4458a912f1516 *man/Fest.Rd 051bdfab231aeab9a0581a32ceab4ea0 *man/Finhom.Rd 656a5eb7c331a31d720eddfe16ad6948 *man/FmultiInhom.Rd 5573588fa02f0c84003342ac749b1e74 *man/G3est.Rd bd79daebe1a291a7f1ed12a4fb22ef3f *man/Gcross.Rd c919ec7e4a042bf64af0825a8fc7fc90 *man/Gcross.inhom.Rd c01c15bf39bb0d1175f4cc4cfab326f6 *man/Gdot.Rd 47d217cf853732d00e3f0850c40ff120 *man/Gdot.inhom.Rd fdba5deb6f1830f0b94ba06b774e6fd6 *man/Gest.Rd 9b9be7d55f68df7aa72faf033164934a *man/Gfox.Rd eab511143ddd5f83fe9f9e9cc511bf01 *man/Ginhom.Rd 258357779c4928f84df4fb60c3d7eb36 *man/Gmulti.Rd 9ed4b8d8846ad54a502b84c21a3e38d9 *man/GmultiInhom.Rd 64ce9e7e1f918376d1f33890ec8435e1 *man/Hest.Rd c395612c6a62d893b3ebc19c6fea9fea *man/Iest.Rd ff3535bd7ecfb4b31d4fd2c7e4be7f9f *man/Jcross.Rd 11d4bcf4060f2d937aad79260e0d0b47 *man/Jcross.inhom.Rd ee7a8dfd4a83c192e259213700c3eeb7 *man/Jdot.Rd abbebfcd97a9df3bb0315ab2209d8697 *man/Jdot.inhom.Rd 8a5402bea292abb18c3fb73526fbbbe1 *man/Jest.Rd c5b906a5f9e442f9dcb7c513d8caf01f *man/Jinhom.Rd 7380c2c6004c6c707e83352f06d185d0 *man/Jmulti.Rd a4d0f0cf90b8d5fdc910fbe7a8f80ec1 *man/Jmulti.inhom.Rd ecd2fc0f300c025c765b2500c62402d7 *man/K3est.Rd a921fd5013d3ee19502e1442851fa6d2 *man/Kcross.Rd e57f3611ae8dedb92ca68bb34f630180 *man/Kcross.inhom.Rd d14c3b3f95ef79d839a3f924fd068cdc *man/Kdot.Rd 58e88d21f7837f0d25b86e98ef70e8b5 *man/Kdot.inhom.Rd 919e01f8a1733c7d26bafb4d15566d5b *man/Kest.Rd d9f91768b8a129feb9c122cd73a53645 *man/Kest.fft.Rd d311967c9ddbe48add8b4870eee73efc *man/Kinhom.Rd a44de4507b3aa5f2836a4166945842e7 *man/Kmark.Rd 9dab7fe551ec9ad125a7c8e68c4e6d9f *man/Kmeasure.Rd ae117df4c50ce246090e49082603cb8c *man/Kmulti.Rd 80ec9dc59ed7ac00b3bbc95780a5a0c8 *man/Kmulti.inhom.Rd b652df32aaa4843592c7e97893e5a793 *man/Kscaled.Rd e331b538164d2b803bd03ba71fbf55b1 *man/Ksector.Rd 174ec8f6df2d4f19c374f0278b9a04a0 *man/Lcross.Rd 915d596a67130206521163778f57e9a6 *man/Lcross.inhom.Rd bc5e6ba924f67ff7db507aacaf6890a3 *man/Ldot.Rd 194cf3a2af7edc846b534bf347a91407 *man/Ldot.inhom.Rd e4a91282dbfd772c23800fc25943b962 *man/Lest.Rd 0c881772e4c160aa2e775406e9899ff3 *man/Linhom.Rd c691cce75500e8b0645bc684fc279455 *man/Math.fasp.Rd c6bea72f0a3afd9e60f3aac9d0b41048 *man/Math.fv.Rd 3856350ef8ce867f1b9fa855082b74f4 *man/PPversion.Rd ce2f8104116810b2735dd24e878e5f01 *man/Smooth.Rd e70fc1f8281705102f2c381c23f125fb *man/Smooth.fv.Rd c3f095e440dfecd3abc849a6c403b61a *man/Smooth.ppp.Rd e5ad9be9eac0f92b2ebbdc02f558086d *man/Smooth.ssf.Rd 06015a55b03f35dec500be3797d4c14d *man/SmoothHeat.Rd 774d0d701731ff025cb79bc5ae817004 *man/SmoothHeat.ppp.Rd 33beae68594bd5fa119340460b502efe *man/Smoothfun.ppp.Rd 7a49a1bc238ce23cefc2f9b546bded5a *man/SpatialMedian.ppp.Rd eb354d4f81ce0f7acbc789907e219fba *man/SpatialQuantile.Rd 272e27688383202ad7781a69ec312020 *man/SpatialQuantile.ppp.Rd fdc54451c086b24181192d82ed04a7e7 *man/Tstat.Rd ab3ab28866f2433cdd7b53342a581f2f *man/Window.quadrattest.Rd ea707ea02f11451b38eccb6e38731fab *man/adaptive.density.Rd 5233ba92c770de0dc848e77df4491c4c *man/allstats.Rd e4781ff839302ee8034ed9380e037b6f *man/alltypes.Rd d8cbf00e237000a80b5cedea135fa5fb *man/as.data.frame.envelope.Rd d740d6e1282248fc9131d68a8d235d20 *man/as.function.fv.Rd 08d940dc470bb44f7e7cfff90770cd1e *man/as.function.rhohat.Rd 04aae9f797fc245131cd2b1e565c3817 *man/as.fv.Rd b45c90e759c8870a603615672a9b5429 *man/as.owin.quadrattest.Rd 41c61f04f7b163496bd1b6b17689f690 *man/as.tess.Rd d6119f0f62f690a2cb1cfebb568ea798 *man/auc.Rd 0c64d2c1c4449c3b1d95614df96801c7 *man/berman.test.Rd d2c274a96d29ca5cdc82af047acbbbab *man/bind.fv.Rd 8ba1641bd79b00e57f0b9182f8b72d45 *man/bits.envelope.Rd 4918f6fd9c059fb7df753f8e3e2b6937 *man/bits.test.Rd 802793bbaef1a44f2cb1f2a793e379a0 *man/blur.Rd 650dc52e263179cfa99933ef1b7282e7 *man/blurHeat.Rd 4a56d9414ae0aaf1738665e2b9241a59 *man/boyce.Rd 023f5fce9df18a86aed71c1d9548661d *man/bw.CvL.Rd 7a93545c1f8e5244742f0f0ba9f72a05 *man/bw.CvL.adaptive.Rd fd37a159cab56206b7bc3aca14fc623e *man/bw.CvLHeat.Rd fd800b8c4e6797a30afdab5874c1af51 *man/bw.abram.ppp.Rd 510bf011020bf6da204e52ecb11b6b80 *man/bw.diggle.Rd 0ae873aa45b62350fee2766299ec65d7 *man/bw.frac.Rd ee2f9ebe620221fd8dd650752b9fa4e8 *man/bw.optim.object.Rd b5cd59d3d8e956e93cc5f92880ef8d30 *man/bw.pcf.Rd ca8bc57d72f6c4722a9f2c5c2aa2f903 *man/bw.ppl.Rd 536f7ec8a45b6ffa3c08bde544e702e7 *man/bw.pplHeat.Rd 1ef4f009993757931a18aa0a4924a213 *man/bw.relrisk.Rd 100615c80b97ade0cf054a61b1843e3d *man/bw.relriskHeatppp.Rd 20b4fc928bc6de12b13281cdfbff767e *man/bw.scott.Rd 2eba62af96953eb5e04d83c489b5f7f3 *man/bw.smoothppp.Rd 41a36343163ce6de22ec851dcfa1dc73 *man/bw.stoyan.Rd d56a6cca28857aed4da07989d3ff87ca *man/cdf.test.Rd c9d72cc2b707dc4cdc8a091441c0330e *man/circdensity.Rd c4058c79a2e0060812b41aae6e21cd45 *man/clarkevans.Rd f36298d0d894f41c476c8821ae67e339 *man/clarkevans.test.Rd c7399f1f3247855779cb574b61ab5c87 *man/clusterset.Rd cf6b5cd9e51695212ab5879474032b23 *man/collapse.fv.Rd 6214ce94633896c39ef78675e3609367 *man/compatible.fasp.Rd d47029d8b00862359878aafcf21ef095 *man/compatible.fv.Rd 4bba2435cc08f5f6927399c8992b2629 *man/compileCDF.Rd c14df44204cf0e512e1d26691c84ee08 *man/compileK.Rd d42866e7843fe04827c1f8f74c96e9ae *man/cov.im.Rd 5a937a4d7cd2da5b2babdd2066816ae6 *man/dclf.progress.Rd 1c767fe98a56feb6e6fd742725f6b5b8 *man/dclf.sigtrace.Rd a20d616a5b647aa1947ed8f9ccb5cf9f *man/dclf.test.Rd 2d120c784c96a580dbd0e3dc596f63bf *man/density.ppp.Rd e0b77a1d307a9b0fff28f31af0e0f326 *man/density.psp.Rd 2ebf825bde4517f348e9f56fe81a14f1 *man/density.splitppp.Rd 7d4101938027d6046cc4b5789bed3fa0 *man/densityAdaptiveKernel.ppp.Rd cf58bc36a83a6fc88c9ab36a926d5a2d *man/densityAdaptiveKernel.splitppp.Rd fb3da5ac3c01ee30ed13b66a6a31a0e0 *man/densityHeat.Rd 803783ad5d1ab2d40f35b82283aeb986 *man/densityHeat.ppp.Rd a5ab03840fdac0dcd41ee621d861b6b3 *man/densityVoronoi.Rd 6ac06c3aa13ac7efea196456aba7536e *man/densityfun.Rd 50fca06c24aac752c750d95c8f56f7f6 *man/deriv.fv.Rd c1024ff575bfeca4fca110a9ac214b7e *man/dg.envelope.Rd 17b90a47a1baecd1229606b33fd6bec0 *man/dg.progress.Rd 882680d334a68e8876b769abff1b721d *man/dg.sigtrace.Rd ebd851568798f893587a8667b989395d *man/dg.test.Rd b72e48220d7edbac9fc1686c28abd50f *man/dimhat.Rd bdd4586f260a366e3b00e5c5c62a820c *man/distcdf.Rd 9ef2454fbb5f430bf111f10c9db9dee2 *man/domain.quadrattest.Rd 623a912c3f9f26c4e122c3c3932a5b48 *man/edge.Ripley.Rd abab7d87f28cfd003e2fd333d087af0d *man/edge.Trans.Rd 88fc1d53cb80ec7584b7975c3180f158 *man/envelope.Rd 755161786a5f1aa3a49f182f8412c76c *man/envelope.envelope.Rd 65b8f88b766c33536dafeb9aeab2d6ac *man/envelope.pp3.Rd a45c4f951da227119a5ebf01cd4e865e *man/envelopeArray.Rd 5136dda21c4ff534fa9324110ec9ca46 *man/eval.fasp.Rd 72165f85587b5e894737dd919cf1df56 *man/eval.fv.Rd ff54d7f168c0a68983cdfbc5581ef6fd *man/fasp.object.Rd c751fce47d2bb11ac2c8181492054851 *man/formula.fv.Rd a0c9bf29bf1f9047e8675a264046b31a *man/fryplot.Rd 39d2d96bfcd7df22a5068f2399afc888 *man/fv.Rd 483c4f60ab88ee608a5e6bc000b055c4 *man/fv.object.Rd cdf23623d05c9cfabf2c13a29496b728 *man/fvnames.Rd 79bd08cc2961045c6fb96945627659d7 *man/harmonise.fv.Rd 6c3e640f273e14fa82985c87142175a0 *man/hopskel.Rd cac9313430d76765a4b2a5ffba9307ab *man/hotbox.Rd f662164926e3e016e01b3381931a692b *man/idw.Rd 15c74c0a3626c007f606b73c46c658a0 *man/increment.fv.Rd 6ddd62c3ae8d6f28db7e886018d877e7 *man/integral.fv.Rd a446571e23fa104dd7c099a88c5ab653 *man/laslett.Rd 88b8ada8c4c6fb28a269badf97a900b4 *man/localK.Rd 0cf23cfc1764077bc3b058c0c6c570c7 *man/localKcross.Rd 054c22ece16c59ba1bd6ca5f29b685a7 *man/localKcross.inhom.Rd 75063d93f0bc38698d5295141f4f37c6 *man/localKdot.Rd 0176ad13dccf9db77f701a2cd73a7477 *man/localKinhom.Rd 93bab536914b16f05aacee78271f9d28 *man/localpcf.Rd b4f5517fb2b539701a5a295054d849c5 *man/lohboot.Rd 1ac6f0faf463fc5c6d189b778ea36fc3 *man/macros/defns.Rd b7d0391259441924d6c90d01a12b7caa *man/markconnect.Rd c5e4fb884019d4594dbf6130aa8ef4ea *man/markcorr.Rd 2f52d0534b1f3df0003a97d1b44108a4 *man/markcrosscorr.Rd 1dc6508ccffd7eda3079893ce6500c4f *man/markmarkscatter.Rd ab666571e6e20c917ee978ed81095e4b *man/marktable.Rd b816aba5e1b538017a04ea70ba9e08bd *man/markvario.Rd 4af538fd19d3df6cbf3e5594fef186c0 *man/methods.rho2hat.Rd 2e019ab44ef8a88988f4f08cdadbf024 *man/methods.rhohat.Rd 8970632c01f234c4317728ea628f09a9 *man/methods.ssf.Rd 552fe639937d214f2c9d4d95689ad345 *man/miplot.Rd 9cbf7bf9b92a29293c583e374279789a *man/nnclean.Rd 1465bca345f5529f65e72998d86f9a3a *man/nncorr.Rd 78eccc68a8cf78a8a90c1de5cd6eed3b *man/nndensity.Rd 418a896aa7d1c53313e7022184ea350a *man/nnorient.Rd 6de1938328ef83158520075d14c3d95b *man/pairMean.Rd ae6a17d8b47bc103cfc21d5ccb2f9fb2 *man/pairorient.Rd 7153bebaa1e2c4210c9a6eb7720ad83a *man/pairs.im.Rd 09ab7311c0b0fffaa524bd31aff5a40f *man/panel.contour.Rd 9b06494a831f88b18f8196c687770fa4 *man/pcf.Rd dfadd4ff1016669603491af6a4362311 *man/pcf.fasp.Rd aeb5cdc38dbcd2acefb53b9b192eb5a5 *man/pcf.fv.Rd c8419a6a2384c77bbc923f18bef6fa87 *man/pcf.ppp.Rd c0d52b0e70c0d75e5e53e3197512ca58 *man/pcf3est.Rd eaa6648e4fa2dbfb07c1bea7411a68cf *man/pcfcross.Rd 2524b6e1b90a7fa9a975cc68b2137667 *man/pcfcross.inhom.Rd 9886654dedf277cf0bd8a251e5dc5672 *man/pcfdot.Rd 26c741af6379f2faffa2247371ca4bc6 *man/pcfdot.inhom.Rd 031333461f1ec6f590d2198f2abb6b45 *man/pcfinhom.Rd 2ee79b17354f561e8fc6157eab347903 *man/pcfmulti.Rd 5c77a4c18c3083bdf4b64eee215d8d3b *man/plot.bermantest.Rd bbeb607bf1e38024b846c54e2544e5b7 *man/plot.cdftest.Rd 12ae0af2c6163ac7738d0cb4bc24750a *man/plot.envelope.Rd 2d140b816c265e69b313815b69bca723 *man/plot.fasp.Rd 3f259046cb2ac866168aa6aee80ccdfa *man/plot.fv.Rd 618e4470eee49f802b3eac51b0a7a9bb *man/plot.laslett.Rd 66cdc1066c5a1bf10e33890b3e88aa99 *man/plot.quadrattest.Rd 28fdf8f7e4af167fab6dcf0ee35feb90 *man/plot.scan.test.Rd dd69d4d54bbdf03a76a85a3c74fac1da *man/plot.ssf.Rd 99f2edb1b8e825388fe0978f74a78155 *man/plot.studpermutest.Rd 1e4ffe51385b95fa44f17d5ebbc1e023 *man/pool.Rd 739e58aba3c619b4b63197aa35c0f779 *man/pool.anylist.Rd 51bb1d87df20a4d6d4f715df8fb12dac *man/pool.envelope.Rd 3607cd8feda7d4ea77bc933e0f3ee78e *man/pool.fasp.Rd a0059ae9ec4770fc7f7760eb11e28344 *man/pool.fv.Rd 2f6e7426a7b09aae4756b51c01f041a0 *man/pool.quadrattest.Rd 46d143e957a755ef88cf33cac69ace4f *man/pool.rat.Rd 45f080f1b075cd34b56df4cc5a3c4d8f *man/quadrat.test.Rd 36d67ee9dd43907fc635301f095abf9e *man/quadrat.test.splitppp.Rd 018a2b681041c642ba087e77cf437669 *man/radcumint.Rd d9273900fb3ff4f7a5f54d4b162d7345 *man/rat.Rd 211bf373b5eb03462397d529b085f67a *man/rectcontact.Rd 4be08e9b6452b466095d7fed03d83af3 *man/reload.or.compute.Rd 4e5b944e97f48008b6f7ec11c9ae5cc1 *man/relrisk.Rd f31a90a8d607012c58b4b0211ae3949b *man/relrisk.ppp.Rd 9ad712cbfbdbb352c537f6df039428d4 *man/relriskHeat.Rd d78b71b1a3a36882c262276bfb783c87 *man/rho2hat.Rd 1fd10961c689d3f2d51b4661d2d5cfef *man/rhohat.Rd 8d5adcf98a67cefc5a4e5be3ae960de8 *man/roc.Rd b062a825c0b76bc5292d540ff065b8bf *man/rose.Rd 4709f8220a6d9b0380b3d9a4ed8eee75 *man/rotmean.Rd c85ea6c89322a71c3c8671f0bfe4603b *man/scan.test.Rd 8c009fb2f87335f39b31c93ab1ac6f7c *man/scanLRTS.Rd 7cfcd24d528d6a4427766e3a6a5c2ce0 *man/sdr.Rd 20d7ec0572c3f2faa5b0916ae5f5398b *man/sdrPredict.Rd 187a1d2166fbbe289b4fe889f0ec7e00 *man/segregation.test.Rd 20a6936c6e2e0ab4267abc5205ccf843 *man/sharpen.Rd 0a905db151e2aa74c4c7799117673407 *man/spatcov.Rd 53c11c5c15e762aa32ff8ca3cc792cfa *man/spatialcdf.Rd 228b320547756b45610d6b8ef91eb802 *man/spatstat.explore-deprecated.Rd 057f6a473a7a8d8f7f4b5cf628cd923d *man/spatstat.explore-internal.Rd 96ae23064df2408c010d8718a94ffaf6 *man/spatstat.explore-package.Rd 5469878df5bd09c57fdbb517b17310e3 *man/ssf.Rd 5017da60c51db6c67b0788951efb3199 *man/stienen.Rd 42a8c76679cc6c23c520b84884725fe0 *man/studpermu.test.Rd f1c7accea61aea9da256092d74cce2aa *man/subspaceDistance.Rd dc6c5989ca0fe33ae6c39dd306ec3afb *man/thresholdCI.Rd 35744b158f8949160537f1025891f13e *man/thresholdSelect.Rd 54da4d0c1d75fd084eed79305fb0f61f *man/transect.im.Rd e6265445dde23155d90f9d7efc27ed78 *man/varblock.Rd f355ed038f7bcb6a4426427c9814afe4 *man/with.fv.Rd d02099194c00a636349b66309fed886e *man/with.ssf.Rd 606313254e3a3982dae8272420c16383 *src/Kborder.c 328e8a137a818344154247b002b61ebf *src/Kborder.h c6533abef1f8acb43ed3bff9a044b931 *src/Knone.c d2c35a9fcefe6c44f21f9547b4d31a0e *src/Knone.h c1937ccea286609c2306f1e373ba4ea8 *src/Krect.c 707949facbb1443a42c08d275604ce41 *src/KrectBody.h cc107241adf0fc298d769ccd0a5bb71a *src/KrectFunDec.h 3bfd60e497ecda68c4a9fd7d6bc26c7a *src/KrectIncrem.h 08a4f417aed6959d94da3abc8bc55d0f *src/KrectV1.h 92268e0af4222764daf4b106a12bbadc *src/KrectV2.h 263f2296e12aee28f73dff92cef5dd61 *src/KrectV3.h 4ab4852a66f7a56b52e999f326f179c2 *src/KrectV4.h 943164f9e93bdf53e1aef8edf1545767 *src/call3d.c 542e6e474340e0aac3ed97f6a92777b4 *src/chunkloop.h becea4a1ea42b8a39021c01abdc6459d *src/constants.h dd3ee2079e7446f90a4b8d18823ffcc0 *src/corrections.c dcf500827ff4c8b060819be2b274a7d7 *src/crossloop.h 6ace5ac8aee9f2636ff86614069e6b26 *src/denspt.c 6b39d47430ef422de440654b6d66bf28 *src/densptcross.c 54362d59e7209bc05d1916552974b16a *src/digber.c 9c0279a43230a9c445152824883db841 *src/f3.c 0ba81075007b6ab741f3eda829da8e99 *src/functable.h 5b327011d47e05a75de96032fc758a87 *src/g3.c 3280a084e3cdcb931801c42fcd111d2e *src/geom3.h 583091c4db28b4b4946b33fcfbde1f70 *src/idw.c 51a8b99777daa410426efe5eba2c0fcb *src/init.c 6f352f32aade21633cad46287eec90aa *src/k3.c e3c36533e55e56f4d33e9d91111d0428 *src/localpcf.c 12391a44a90ca37659eb4bfb9b2537ec *src/localpcf.h 767ddd93b5f66825c1ed17333a89be1d *src/loccum.c 4e29092470baee76e155d2fae3c9d34b *src/loccums.h 25dcd1e2da6967e029da0a8b5119c782 *src/loccumx.h d4f690790bb0e2585fd2a2645e0556d2 *src/looptest.h acdf88b1cfedbbb87d81bb727761decd *src/pairloop.h fcfed5f15f9e77b3510d0ce3656af745 *src/pcf3.c 9651b249e4b459e45aa5c5ce5ba3f34a *src/proto.h 615b05e6d7796156017ceac8f9589df0 *src/raster.c 668e8318237175ac56af1fcfdb41be29 *src/raster.h b525f1b5e6754c376bc5107565b3dbcb *src/ripleybox.h 215efc7557c41b941e3ac720a0277988 *src/ripleypoly.h 64ceb36a9d82f91028f6e97815bd7e98 *src/scan.c 09234550b699f0cc2bc71066519869b5 *src/segdens.c 1091ee379187f845ec9ab976bf1e8009 *src/sphefrac.c 08126353fd72c69221a479f3abb19a38 *src/sphevol.c 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h 39c4b4cba525955d7a920f85e3124fe8 *tests/funky.tab b51217edee1da528ea36a1b80ad13aff *tests/testsAtoC.R ee122216ab3bbda2a7fd04e9173c6ec8 *tests/testsD.R d2b871272db19837971257b6750f1736 *tests/testsEtoF.R 8b7ee3178b922bcae4d967bf2fa45b57 *tests/testsGtoJ.R 54ee46904a9199161c01d9e000ef9146 *tests/testsK.R aa5ffbe20b3d506eceb085928b774d72 *tests/testsL.R 6b33418edafbfd534a28bc326c976706 *tests/testsM.R c40906f77d588b02bd434a35cab2d099 *tests/testsNtoO.R a2ea1be87ba32253d6105fa3481e288d *tests/testsP1.R 845648d0be652708b5476c4384fdef65 *tests/testsP2.R 845648d0be652708b5476c4384fdef65 *tests/testsQ.R 29e511f0b9f600e4c41adc247f066ed2 *tests/testsR1.R 845648d0be652708b5476c4384fdef65 *tests/testsR2.R 0559846d5336e081deea883e8ab83315 *tests/testsS.R e701aa07cb9e01f2555990012d8d0c8c *tests/testsT.R 845648d0be652708b5476c4384fdef65 *tests/testsUtoZ.R spatstat.explore/R/0000755000176200001440000000000014700374620013776 5ustar liggesusersspatstat.explore/R/Kmulti.inhom.R0000644000176200001440000002661414611073307016507 0ustar liggesusers# # Kmulti.inhom.S # # $Revision: 1.56 $ $Date: 2023/02/28 02:06:17 $ # # # ------------------------------------------------------------------------ Lcross.inhom <- function(X, i, j, ..., correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] if(missing(correction)) correction <- NULL K <- Kcross.inhom(X, i, j, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) # relabel the fv object L <- rebadge.fv(L, substitute(L[inhom,i,j](r), list(i=iname,j=jname)), c("L", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(L[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } Ldot.inhom <- function(X, i, ..., correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(correction)) correction <- NULL K <- Kdot.inhom(X, i, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[inhom, i ~ dot](r), list(i=iname)), c("L", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(L[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } "Kcross.inhom" <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) K <- Kmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIJ, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(K, substitute(K[inhom,i,j](r), list(i=iname,j=jname)), c("K", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(K[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(K, "dangerous") return(result) } "Kdot.inhom" <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL miss.update <- missing(update) miss.leave <- missing(leaveoneout) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") K <- Kmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIdot, Iname=Iname, Jname=Jname, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout, miss.update=miss.update, miss.leave=miss.leave) iname <- make.parseable(paste(i)) result <- rebadge.fv(K, substitute(K[inhom, i ~ dot](r), list(i=iname)), c("K", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(K[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(K, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } "Kmulti.inhom"<- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") dflt <- list(Iname="points satisfying condition I", Jname="points satisfying condition J", miss.update=missing(update), miss.leave=missing(leaveoneout)) extrargs <- resolve.defaults(list(...), dflt) if(length(extrargs) > length(dflt)) warning("Additional arguments unrecognised") Iname <- extrargs$Iname Jname <- extrargs$Jname miss.update <- extrargs$miss.update miss.leave <- extrargs$miss.leave npts <- npoints(X) W <- as.owin(X) areaW <- area(W) # validate edge correction correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) # validate I, J I <- ppsubset(X, I, "I") J <- ppsubset(X, J, "J") if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XI <- X[I] XJ <- X[J] nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) ## r values rmaxdefault <- rmax.rule("K", W, nJ/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max ## lambda values a <- resolve.lambdacross(X=X, I=I, J=J, lambdaI=lambdaI, lambdaJ=lambdaJ, lambdaX=lambdaX, lambdaIJ=lambdaIJ, ..., sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, update=update, Iexplain=Iname, Jexplain=Jname) lambdaI <- a$lambdaI lambdaJ <- a$lambdaJ lambdaIJ <- a$lambdaIJ danger <- a$danger dangerous <- a$dangerous ## Weight for each pair if(!is.null(lambdaIJ)) { danger <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iname)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jname)) } # Recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") fname <- c("K", "list(inhom,I,J)") K <- fv(K, "r", quote(K[inhom, I, J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(K[list(inhom,I,J)](r))) # identify close pairs of points close <- crosspairs(XI, XJ, max(r), what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair if(is.null(lambdaIJ)) weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) else weight <- 1/lambdaIJ[cbind(icloseI, jcloseJ)] # Compute estimates by each of the selected edge corrections. if(any(correction == "none")) { ## uncorrected wh <- whist(dclose, breaks$val, weight) Kun <- cumsum(wh)/areaW rmax <- diameter(W)/2 Kun[r >= rmax] <- NA K <- bind.fv(K, data.frame(un=Kun), makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un") } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(XI) bI <- b[icloseI] # apply reduced sample algorithm RS <- Kwtsum(dclose, bI, weight, b, 1/lambdaI, breaks) if(any(correction == "border")) { Kb <- RS$ratio K <- bind.fv(K, data.frame(border=Kb), makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border") } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) K <- bind.fv(K, data.frame(bord.modif=Kbm), makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "translate")) { ## translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Kiso <- cumsum(wh)/areaW rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), makefvlabel(NULL, "hat", fname, "iso"), "Ripley isotropic correction estimate of %s", "iso") } ## default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } spatstat.explore/R/compileCDF.R0000644000176200001440000000701614611073310016063 0ustar liggesusers#' compileCDF.R #' #' Wrappers for estimating CDF from right-censored data #' #' Copyright (c) 1991-2023 Adrian Baddeley, Rolf Turner and Ege Rubak #' GNU Public Licence (>= 2.0) #' #' $Revision: 1.1 $ $Date: 2023/11/04 04:45:32 $ censtimeCDFest <- function(o, cc, d, breaks, ..., KM=TRUE, RS=TRUE, HAN=TRUE, RAW=TRUE, han.denom=NULL, tt=NULL, pmax=0.9, fname="CDF", fexpr=quote(CDF(r))) { # Histogram-based estimation of cumulative distribution function # of lifetimes subject to censoring. # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # han.denom: denominator (eroded area) for each value of r # tt: uncensored lifetimes T_i, if known breaks <- as.breakpts(breaks) bval <- breaks$val rval <- breaks$r rmax <- breaks$max # Kaplan-Meier and/or Reduced Sample out <- km.rs.opt(o, cc, d, breaks, KM=KM, RS=RS) # convert to data frame out$breaks <- NULL df <- as.data.frame(out) # Raw ecdf of observed lifetimes if available if(RAW && !is.null(tt)) { h <- whist(tt[tt <= rmax], breaks=bval) df <- cbind(df, data.frame(raw=cumsum(h)/length(tt))) } # Hanisch if(HAN) { if(is.null(han.denom)) stop("Internal error: missing denominator for Hanisch estimator") if(length(han.denom) != length(rval)) stop(paste("Internal error:", "length(han.denom) =", length(han.denom), "!=", length(rval), "= length(rvals)")) # uncensored distances x <- o[d] # calculate Hanisch estimator h <- whist(x[x <= rmax], breaks=bval) H <- cumsum(h/han.denom) df <- cbind(df, data.frame(han=H/max(H[is.finite(H)]))) } # determine appropriate plotting range bestest <- if(KM) "km" else if(HAN) "han" else if(RS) "rs" else "raw" alim <- range(df$r[df[[bestest]] <= pmax]) # convert to fv object nama <- c("r", "km", "hazard", "han", "rs", "raw") avail <- c(TRUE, KM, KM, HAN, RS, RAW) iscdf <- c(FALSE, TRUE, FALSE, TRUE, TRUE, TRUE) labl <- c("r", makefvlabel(NULL, "hat", fname, "km"), "hat(lambda)(r)", makefvlabel(NULL, "hat", fname, "han"), makefvlabel(NULL, "hat", fname, "bord"), makefvlabel(NULL, "hat", fname, "raw") )[avail] desc <- c("distance argument r", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)", "Hanisch estimate of %s", "border corrected estimate of %s", "uncorrected estimate of %s")[avail] df <- df[, nama[avail]] Z <- fv(df, "r", fexpr, bestest, . ~ r, alim, labl, desc, fname=fname) fvnames(Z, ".") <- nama[iscdf & avail] return(Z) } # simple interface for students and code development compileCDF <- function(D, B, r, ..., han.denom=NULL, check=TRUE) { han <- !is.null(han.denom) breaks <- breakpts.from.r(r) if(check) { stopifnot(length(D) == length(B) && all(D >= 0) && all(B >= 0)) if(han) stopifnot(length(han.denom) == length(r)) } D <- as.vector(D) B <- as.vector(B) # observed (censored) lifetimes o <- pmin.int(D, B) # censoring indicators d <- (D <= B) # go result <- censtimeCDFest(o, B, d, breaks, HAN=han, han.denom=han.denom, RAW=TRUE, tt=D) result <- rebadge.fv(result, new.fname="compileCDF") } spatstat.explore/R/envelope.R0000644000176200001440000023744314611073310015744 0ustar liggesusers# # envelope.R # # computes simulation envelopes # # $Revision: 2.125 $ $Date: 2023/08/15 08:07:52 $ # envelope <- function(Y, fun, ...) { UseMethod("envelope") } # ................................................................. # A 'simulation recipe' contains the following variables # # type = Type of simulation # "csr": uniform Poisson process # "rmh": simulated realisation of fitted Gibbs or Poisson model # "kppm": simulated realisation of fitted cluster model # "slrm": simulated realisation of spatial logistic regression # "expr": result of evaluating a user-supplied expression # "list": user-supplied list of point patterns # # expr = expression that is repeatedly evaluated to generate simulations # # envir = environment in which to evaluate the expression `expr' # # 'csr' = TRUE iff the model is (known to be) uniform Poisson # # pois = TRUE if model is known to be Poisson # # constraints = additional information about simulation (e.g. 'with fixed n') # # ................................................................... simulrecipe <- function(type, expr, envir, csr, pois=csr, constraints="") { if(csr && !pois) warning("Internal error: csr=TRUE but pois=FALSE") out <- list(type=type, expr=expr, envir=envir, csr=csr, pois=pois, constraints=constraints) class(out) <- "simulrecipe" out } envelope.ppp <- function(Y, fun=Kest, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) ismarked <- is.marked(Y) ismulti <- is.multitype(Y) fix.marks <- fix.marks && ismarked if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument if(fix.n || fix.marks) warning("fix.n and fix.marks were ignored, because 'simulate' was given") # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y, checkdup=FALSE) Yintens <- sY$intensity nY <- npoints(Y) Ywin <- Y$window Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { # unmarked point pattern expression(rpoispp(Yintens, win=Ywin)) } else if(is.null(dim(Ymarx))) { # single column of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { # multiple columns of marks expression({ A <- rpoispp(Yintens, win=Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } dont.complain.about(Yintens, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE) } else if(fix.marks) { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # with fixed number of points and fixed marks # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- expression(runifpoint(nY, Ywin) %mark% Ymarx) # suppress warnings from code checkers dont.complain.about(nY, Ywin, Ymarx) # simulation constraints (explanatory string) constraints <- if(ismulti) "with fixed number of points of each type" else "with fixed number of points and fixed marks" # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = constraints) } else { # ................................................... # Data pattern is argument Y X <- Y # Realisations of binomial process # will be generated by runifpoint nY <- npoints(Y) Ywin <- as.owin(Y) Ymarx <- marks(Y) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runifpoint(nY, Ywin)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runifpoint(nY, Ywin); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } dont.complain.about(nY, Ywin) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE, constraints = "with fixed number of points") } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } ## Code for envelope.ppm, envelope.kppm, envelope.slrm ## is moved to spatstat.model ## ................................................................. ## Engine for simulating and computing envelopes ## ................................................................. # # X is the data point pattern, which could be ppp, pp3, ppx etc # X determines the class of pattern expected from the simulations # envelopeEngine <- function(X, fun, simul, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, saveresultof=NULL, weights=NULL, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, maxerr.action=c("fatal", "warn", "null"), internal=NULL, cl=NULL, envir.user=envir.user, expected.arg="r", do.pwrong=FALSE, foreignclass=NULL, collectrubbish=FALSE) { # envir.here <- sys.frame(sys.nframe()) alternative <- match.arg(alternative) maxerr.action <- match.arg(maxerr.action) foreignclass <- as.character(foreignclass) if(length(foreignclass) != 0 && clipdata) { warning(paste("Ignoring clipdata=TRUE:", "I don't know how to clip objects of class", sQuote(paste(foreignclass, collapse=",")))) clipdata <- FALSE } # ---------------------------------------------------------- # Determine Simulation # ---------------------------------------------------------- check.1.integer(nsim) stopifnot(nsim > 1) # Identify class of patterns to be simulated, from data pattern X Xclass <- if(is.ppp(X)) "ppp" else if(is.pp3(X)) "pp3" else if(is.ppx(X)) "ppx" else if(inherits(X, foreignclass)) foreignclass else stop("Unrecognised class of point pattern") Xobjectname <- paste("point pattern of class", sQuote(Xclass)) # Option to use weighted average if(use.weights <- !is.null(weights)) { # weight can be either a numeric vector or a function if(is.numeric(weights)) { compute.weights <- FALSE weightfun <- NULL } else if(is.function(weights)) { compute.weights <- TRUE weightfun <- weights weights <- NULL } else stop("weights should be either a function or a numeric vector") } else compute.weights <- FALSE # Undocumented option to generate patterns only. patterns.only <- identical(internal$eject, "patterns") # Undocumented option to evaluate 'something' for each simulation if(savevalues <- !is.null(saveresultof)) { stopifnot(is.function(saveresultof)) SavedValues <- list() ## might be a function of the pattern only, or both pattern and summary fun result.depends.both <- (length(formals(saveresultof)) >= 2) } # Identify type of simulation from argument 'simul' if(inherits(simul, "simulrecipe")) { # .................................................. # simulation recipe is given simtype <- simul$type simexpr <- simul$expr envir <- simul$envir csr <- simul$csr pois <- simul$pois constraints <- simul$constraints } else { # ................................................... # simulation is specified by argument `simulate' to envelope() simulate <- simul # which should be an expression, or a list of point patterns, # or an envelope object, or a function to be applied to the data csr <- FALSE # override if(!is.null(icsr <- internal$csr)) csr <- icsr pois <- csr constraints <- "" # model <- NULL if(inherits(simulate, "envelope")) { # envelope object: see if it contains stored point patterns simpat <- attr(simulate, "simpatterns") if(!is.null(simpat)) simulate <- simpat else stop(paste("The argument", sQuote("simulate"), "is an envelope object but does not contain", "any saved point patterns.")) } if(is.expression(simulate)) { ## The user-supplied expression 'simulate' will be evaluated repeatedly simtype <- "expr" simexpr <- simulate envir <- envir.user } else if(is.function(simulate)) { ## User-supplied function 'simulate' will be repeatedly evaluated on X simtype <- "func" simexpr <- expression(simulate(X)) envir <- envir.here } else if(is.list(simulate) && all(sapply(simulate, inherits, what=Xclass))) { #' The user-supplied list of point patterns will be used simtype <- "list" SimDataList <- simulate #' expression that will be evaluated simexpr <- expression(SimDataList[[i+nerr]]) dont.complain.about(SimDataList) envir <- envir.here #' ensure that `i' is defined i <- 1L nerr <- 0L maxnerr <- min(length(SimDataList)-nsim, maxnerr) #' any messages? if(!is.null(mess <- attr(simulate, "internal"))) { # determine whether these point patterns are realisations of CSR csr <- isTRUE(mess$csr) } } else if(is.list(simulate) && all(sapply(simulate, is.list)) && all(lengths(simulate) == 1) && all(sapply((elements <- lapply(simulate, "[[", i=1)), inherits, what=Xclass))) { #' malformed argument: list(list(ppp), list(ppp), ....) SimDataList <- elements simtype <- "list" #' expression that will be evaluated simexpr <- expression(SimDataList[[i+nerr]]) dont.complain.about(SimDataList) envir <- envir.here #' ensure that `i' is defined i <- 1L nerr <- 0L maxnerr <- min(length(SimDataList)-nsim, maxnerr) #' any messages? if(!is.null(mess <- attr(simulate, "internal"))) { # determine whether these point patterns are realisations of CSR csr <- isTRUE(mess$csr) } } else stop(paste(sQuote("simulate"), "should be an expression,", "or a list of point patterns of the same kind as X")) } # ------------------------------------------------------------------- # Determine clipping window # ------------------------------------------------------------------ if(clipdata) { # Generate one realisation Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)), slrm=stop(paste("Internal error: simulate.slrm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), func=stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) # Extract window clipwin <- Xsim$window if(!is.subset.owin(clipwin, X$window)) warning("Window containing simulated patterns is not a subset of data window") } # ------------------------------------------------------------------ # Summary function to be applied # ------------------------------------------------------------------ if(is.null(fun)) stop("Internal error: fun is NULL") # Name of function, for error messages fname <- if(is.name(substitute(fun))) short.deparse(substitute(fun)) else if(is.character(fun)) fun else "fun" fname <- sQuote(fname) # R function to apply if(is.character(fun)) { gotfun <- try(get(fun, mode="function")) if(inherits(gotfun, "try-error")) stop(paste("Could not find a function named", sQuote(fun))) fun <- gotfun } else if(!is.function(fun)) stop(paste("unrecognised format for function", fname)) fargs <- names(formals(fun)) if(!any(c(expected.arg, "...") %in% fargs)) stop(paste(fname, "should have", ngettext(length(expected.arg), "an argument", "arguments"), "named", commasep(sQuote(expected.arg)), "or a", sQuote("..."), "argument")) usecorrection <- any(c("correction", "...") %in% fargs) # --------------------------------------------------------------------- # validate other arguments if((nrank %% 1) != 0) stop("nrank must be an integer") if((nsim %% 1) != 0) stop("nsim must be an integer") stopifnot(nrank > 0 && nrank < nsim/2) arg.given <- any(expected.arg %in% names(list(...))) if(tran <- !is.null(transform)) { stopifnot(is.expression(transform)) # prepare expressions to be evaluated each time transform.funX <- inject.expr("with(funX,.)", transform) transform.funXsim <- inject.expr("with(funXsim,.)", transform) # .... old code using 'eval.fv' ...... # transform.funX <- dotexpr.to.call(transform, "funX", "eval.fv") # transform.funXsim <- dotexpr.to.call(transform, "funXsim", "eval.fv") # 'transform.funX' and 'transform.funXsim' are unevaluated calls to eval.fv } if(!is.null(ginterval)) stopifnot(is.numeric(ginterval) && length(ginterval) == 2) # --------------------------------------------------------------------- # Evaluate function for data pattern X # ------------------------------------------------------------------ Xarg <- if(!clipdata) X else X[clipwin] corrx <- if(usecorrection) list(correction="best") else NULL dont.complain.about(Xarg) funX <- do.call(fun, resolve.defaults(list(quote(Xarg)), list(...), funYargs, corrx)) if(!inherits(funX, "fv")) stop(paste("The function", fname, "must return an object of class", sQuote("fv"))) ## catch 'conservation' parameters conserveargs <- attr(funX, "conserve") if(!is.null(conserveargs) && !any(c("conserve", "...") %in% fargs)) stop(paste("In this usage, the function", fname, "should have an argument named 'conserve' or '...'")) ## warn about 'dangerous' arguments if(!is.null(dang <- attr(funX, "dangerous")) && any(uhoh <- dang %in% names(list(...)))) { nuh <- sum(uhoh) warning(paste("Envelope may be invalid;", ngettext(nuh, "argument", "arguments"), commasep(sQuote(dang[uhoh])), ngettext(nuh, "appears", "appear"), "to have been fixed."), call.=FALSE) } argname <- fvnames(funX, ".x") valname <- fvnames(funX, ".y") has.theo <- "theo" %in% fvnames(funX, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) if(tran) { # extract only the recommended value if(use.theory) funX <- funX[, c(argname, valname, "theo")] else funX <- funX[, c(argname, valname)] # apply the transformation to it funX <- eval(transform.funX) } argvals <- funX[[argname]] # fX <- funX[[valname]] arg.desc <- attr(funX, "desc")[match(argname, colnames(funX))] # default domain over which to maximise alim <- attr(funX, "alim") if(global && is.null(ginterval)) ginterval <- if(arg.given || is.null(alim)) range(argvals) else alim #-------------------------------------------------------------------- # Determine number of simulations # ------------------------------------------------------------------ # ## determine whether dual simulations are required ## (one set of simulations to calculate the theoretical mean, ## another independent set of simulations to obtain the critical point.) dual <- (global && !use.theory && !VARIANCE) if(dual) { check.1.integer(nsim2) stopifnot(nsim2 >= 1) } Nsim <- if(!dual) nsim else (nsim + nsim2) # if taking data from a list of point patterns, # check there are enough of them if(simtype == "list" && Nsim > length(SimDataList)) stop(paste("Number of simulations", paren(if(!dual) paste(nsim) else paste(nsim, "+", nsim2, "=", Nsim) ), "exceeds number of point pattern datasets supplied")) # Undocumented secret exit # ------------------------------------------------------------------ if(patterns.only) { # generate simulated realisations and return only these patterns if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", slrm = "simulated realisations of spatial logistic regression model", expr = "simulations by evaluating expression", func = "simulations by evaluating function", list = "point patterns from list", "simulated realisations") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } XsimList <- list() # start simulation loop sstate <- list() for(i in 1:Nsim) { if(verbose) sstate <- progressreport(i, Nsim, state=sstate) Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr={ stop(paste("Internal error:", Xobjectname, "not generated")) }, rmh={ stop(paste("Internal error: rmh did not return an", Xobjectname)) }, kppm={ stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)) }, slrm={ stop(paste("Internal error: simulate.slrm did not return an", Xobjectname)) }, expr={ stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)) }, func={ stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)) }, list={ stop(paste("Internal error: list entry was not an", Xobjectname)) }, stop(paste("Internal error:", Xobjectname, "not generated")) ) XsimList[[i]] <- Xsim } if(verbose) { cat(paste("Done.\n")) flush.console() } attr(XsimList, "internal") <- list(csr=csr) return(XsimList) } # capture main decision parameters envelopeInfo <- list(call=cl, Yname=Yname, valname=valname, csr=csr, csr.theo=csr.theo, use.theory=use.theory, pois=pois, simtype=simtype, constraints=constraints, nrank=nrank, nsim=nsim, Nsim=Nsim, global=global, ginterval=ginterval, dual=dual, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, alternative=alternative, scale=scale, clamp=clamp, use.weights=use.weights, do.pwrong=do.pwrong) # ---------------------------------------- ######### SIMULATE ####################### # ---------------------------------------- if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", slrm = "simulated realisations of fitted spatial logistic regression model", expr = "simulations by evaluating expression", func = "simulations by evaluating function", list = "point patterns from list", "simulated patterns") if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" splat(action, Nsim, descrip, explan, "...") } # determine whether simulated point patterns should be saved catchpatterns <- savepatterns && simtype != "list" Caughtpatterns <- list() # allocate space for computed function values nargvals <- length(argvals) simvals <- matrix(, nrow=nargvals, ncol=Nsim) # allocate space for weights to be computed if(compute.weights) weights <- numeric(Nsim) # inferred values of function argument 'r' or equivalent parameters if(identical(expected.arg, "r") && identical(argname, "r")) { # Kest, etc inferred.r.args <- list(r=argvals) } else if(identical(expected.arg, c("rmax", "nrval"))) { # K3est, etc inferred.r.args <- list(rmax=max(argvals), nrval=length(argvals)) } else if(any(c(argname, "...") %in% fargs)) { ## assume it accepts the vector of argument values inferred.r.args <- structure(list(argvals), names=argname) } else { stop(paste("Don't know how to infer values of", if(length(expected.arg)) commasep(sQuote(expected.arg)) else "function argument")) } # arguments for function when applied to simulated patterns funargs <- resolve.defaults(funargs, inferred.r.args, list(...), conserveargs, if(usecorrection) list(correction="best") else NULL) # reject simulated pattern if function values are all NA (etc) rejectNA <- isTRUE(rejectNA) # start simulation loop nerr <- 0 gaveup <- FALSE if(verbose) pstate <- list() for(i in 1:Nsim) { ## safely generate a random pattern and apply function success <- FALSE while(!success && !gaveup) { Xsim <- eval(simexpr, envir=envir) ## check valid point pattern if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error:", "simulate.kppm did not return an", Xobjectname)), slrm=stop(paste("Internal error:", "simulate.slrm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), func=stop(paste("Evaluating the function", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) if(catchpatterns) Caughtpatterns[[i]] <- Xsim if(savevalues && !result.depends.both) SavedValues[[i]] <- saveresultof(Xsim) if(compute.weights) { wti <- weightfun(Xsim) if(!is.numeric(wti)) stop("weightfun did not return a numeric value") if(length(wti) != 1L) stop("weightfun should return a single numeric value") weights[i] <- wti } ## apply function safely funXsim <- try(do.call(fun, c(list(Xsim), funargs)), silent=silent) success <- !inherits(funXsim, "try-error") && inherits(funXsim, "fv") && (!rejectNA || any(is.finite(funXsim[[valname]]))) if(!success) { #' error in computing summary function nerr <- nerr + 1L if(nerr > maxnerr) { gaveup <- TRUE errtype <- if(rejectNA) "fatal errors or NA function values" if(simtype == "list") { whinge <- paste("Exceeded maximum possible number of errors", "when evaluating summary function:", length(SimDataList), "patterns provided,", nsim, "patterns required,", nerr, ngettext(nerr, "pattern", "pattern"), "rejected due to", errtype) } else { whinge <- paste("Exceeded maximum permissible number of", errtype, paren(paste("maxnerr =", maxnerr)), "when evaluating summary function", "for simulated point patterns") } switch(maxerr.action, fatal = stop(whinge, call.=FALSE), warn = warning(whinge, call.=FALSE), null = {}) } else if(!silent) cat("[retrying]\n") } #' ..... end of while(!success) ................ } if(gaveup) break; # exit loop now ## sanity checks if(i == 1L) { if(!inherits(funXsim, "fv")) stop(paste("When applied to a simulated pattern, the function", fname, "did not return an object of class", sQuote("fv"))) argname.sim <- fvnames(funXsim, ".x") valname.sim <- fvnames(funXsim, ".y") if(argname.sim != argname) stop(paste("The objects returned by", fname, "when applied to a simulated pattern", "and to the data pattern", "are incompatible. They have different argument names", sQuote(argname.sim), "and", sQuote(argname), "respectively")) if(valname.sim != valname) stop(paste("When", fname, "is applied to a simulated pattern", "it provides an estimate named", sQuote(valname.sim), "whereas the estimate for the data pattern is named", sQuote(valname), ". Try using the argument", sQuote("correction"), "to make them compatible")) rfunX <- with(funX, ".x") rfunXsim <- with(funXsim, ".x") if(!identical(rfunX, rfunXsim)) stop(paste("When", fname, "is applied to a simulated pattern,", "the values of the argument", sQuote(argname.sim), "are different from those used for the data.")) } if(savevalues && result.depends.both) SavedValues[[i]] <- saveresultof(Xsim, funXsim) if(tran) { # extract only the recommended value if(use.theory) funXsim <- funXsim[, c(argname, valname, "theo")] else funXsim <- funXsim[, c(argname, valname)] # apply the transformation to it funXsim <- eval(transform.funXsim) } # extract the values for simulation i simvals.i <- funXsim[[valname]] if(length(simvals.i) != nargvals) stop("Vectors of function values have incompatible lengths") simvals[ , i] <- funXsim[[valname]] if(verbose) pstate <- progressreport(i, Nsim, state=pstate) if(collectrubbish) { rm(Xsim) rm(funXsim) gc() } } ## end simulation loop if(verbose) { cat("\nDone.\n") flush.console() } # ........................................................... # save functions and/or patterns if so commanded if(!gaveup) { if(savefuns) { alldata <- cbind(argvals, simvals) simnames <- paste("sim", 1:Nsim, sep="") colnames(alldata) <- c(argname, simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu=argname, ylab=attr(funX, "ylab"), valu="sim1", fmla= paste(". ~", argname), alim=attr(funX, "alim"), labl=names(alldata), desc=c(arg.desc, paste("Simulation ", 1:Nsim, sep="")), fname=attr(funX, "fname"), yexp=attr(funX, "yexp"), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames } if(savepatterns) SimPats <- if(simtype == "list") SimDataList else Caughtpatterns } ######### COMPUTE ENVELOPES ####################### etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" if(dual) { jsim <- 1:nsim jsim.mean <- nsim + 1:nsim2 } else { jsim <- jsim.mean <- NULL } result <- envelope.matrix(simvals, funX=funX, jsim=jsim, jsim.mean=jsim.mean, type=etype, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, nrank=nrank, ginterval=ginterval, nSD=nSD, Yname=Yname, do.pwrong=do.pwrong, weights=weights, gaveup=gaveup) ## tack on envelope information attr(result, "einfo") <- resolve.defaults(envelopeInfo, attr(result, "einfo")) if(!gaveup) { ## tack on functions and/or patterns if so commanded if(savefuns) attr(result, "simfuns") <- SimFuns if(savepatterns) { attr(result, "simpatterns") <- SimPats attr(result, "datapattern") <- X } ## undocumented - tack on values of some other quantity if(savevalues) { attr(result, "simvalues") <- SavedValues attr(result, "datavalue") <- if(result.depends.both) saveresultof(X, funX) else saveresultof(X) } } ## save function weights if(use.weights) attr(result, "weights") <- weights return(result) } plot.envelope <- function(x, ..., main) { if(missing(main)) main <- short.deparse(substitute(x)) shade.given <- ("shade" %in% names(list(...))) shade.implied <- !is.null(fvnames(x, ".s")) if(!(shade.given || shade.implied)) { # ensure x has default 'shade' attribute # (in case x was produced by an older version of spatstat) if(all(c("lo", "hi") %in% colnames(x))) fvnames(x, ".s") <- c("lo", "hi") else warning("Unable to determine shading for envelope") } NextMethod("plot", main=main) } print.envelope <- function(x, ...) { e <- attr(x, "einfo") g <- e$global csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints nr <- e$nrank nsim <- e$nsim V <- e$VARIANCE fname <- flat.deparse(attr(x, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) if(!is.null(valname <- e$valname) && waxlyrical('extras')) splat("Edge correction:", dQuote(valname)) ## determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", slrm="simulations of fitted spatial logistic regression model", expr="evaluations of user-supplied expression", func="evaluations of user-supplied function", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { dual <- isTRUE(e$dual) usetheory <- isTRUE(e$use.theory) hownull <- if(usetheory) { "(known exactly)" } else if(dual) { paste("(estimated from a separate set of", e$nsim2, "simulations)") } else NULL formodel <- if(csr) "for CSR" else NULL if(g) { splat("Envelope based on maximum deviation of", fname, "from null value", formodel, hownull) } else if(dual) { splat("Null value of", fname, formodel, hownull) } if(!is.null(attr(x, "simfuns"))) splat("(All simulated function values are stored)") if(!is.null(attr(x, "simpatterns"))) splat("(All simulated point patterns are stored)") } splat("Alternative:", e$alternative) if(!V && waxlyrical('extras')) { ## significance interpretation! alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of", if(g) "simultaneous" else "pointwise", "Monte Carlo test:", paste0(if(g) nr else 2 * nr, "/", nsim+1), "=", signif(alpha, 3)) } if(waxlyrical('gory') && !is.null(pwrong <- attr(x, "pwrong"))) { splat("\t[Estimated significance level of pointwise excursions:", paste0("pwrong=", signif(pwrong, 3), "]")) } NextMethod("print") } summary.envelope <- function(object, ...) { e <- attr(object, "einfo") g <- e$global V <- e$VARIANCE nr <- e$nrank nsim <- e$nsim csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype constraints <- e$constraints alternative <- e$alternative use.theory <- e$use.theory has.theo <- "theo" %in% fvnames(object, "*") csr.theo <- csr && has.theo use.theory <- if(is.null(use.theory)) csr.theo else (use.theory && has.theo) fname <- short.deparse(attr(object, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" splat(type, "critical envelopes for", fname, "\nand observed value for", sQuote(e$Yname)) # determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", slrm="simulations of fitted spatial logistic regression model", expr="evaluations of user-supplied expression", func="evaluations of user-supplied function", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data", "simulated point patterns") } else "simulations of fitted model" if(!is.null(constraints) && nzchar(constraints)) descrip <- paste(descrip, constraints) # splat("Obtained from", nsim, descrip) # if(waxlyrical('extras')) { if(!is.null(e$dual) && e$dual) splat("Theoretical (i.e. null) mean value of", fname, "estimated from a separate set of", e$nsim2, "simulations") if(!is.null(attr(object, "simfuns"))) splat("(All", nsim, "simulated function values", "are stored in attr(,", dQuote("simfuns"), ") )") if(!is.null(attr(object, "simpatterns"))) splat("(All", nsim, "simulated point patterns", "are stored in attr(,", dQuote("simpatterns"), ") )") } # splat("Alternative:", alternative) if(V) { # nSD envelopes splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as sample mean", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), e$nSD, "sample standard deviations") } else { # critical envelopes lo.ord <- if(nr == 1L) "minimum" else paste(ordinal(nr), "smallest") hi.ord <- if(nr == 1L) "maximum" else paste(ordinal(nr), "largest") if(g) splat(switch(alternative, two.sided = "Envelopes", "Critical boundary"), "computed as", if(use.theory) "theoretical curve" else "mean of simulations", switch(alternative, two.sided="plus/minus", less="minus", greater="plus"), hi.ord, "simulated value of maximum", switch(alternative, two.sided="absolute", less="negative", greater="positive"), "deviation") else { if(alternative != "less") splat("Upper envelope: pointwise", hi.ord, "of simulated curves") if(alternative != "greater") splat("Lower envelope: pointwise", lo.ord, "of simulated curves") } symmetric <- (alternative == "two.sided") && !g alpha <- if(!symmetric) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } splat("Significance level of Monte Carlo test:", paste0(if(!symmetric) nr else 2 * nr, "/", nsim+1), "=", alpha) } splat("Data:", e$Yname) return(invisible(NULL)) } # envelope.matrix # core functionality to compute envelope values # theory = funX[["theo"]] # observed = fX envelope.matrix <- function(Y, ..., argvals=rvals, rvals=NULL, ## rvals is old name observed=NULL, theory=NULL, funX=NULL, nsim=NULL, nsim2=NULL, jsim=NULL, jsim.mean=NULL, type=c("pointwise", "global", "variance"), alternative=c("two.sided", "less", "greater"), scale = NULL, clamp=FALSE, csr=FALSE, use.theory = csr, nrank=1, ginterval=NULL, nSD=2, savefuns=FALSE, check=TRUE, Yname=NULL, argname=NULL, arg.desc=NULL, do.pwrong=FALSE, weights=NULL, precomputed=NULL, gaveup=FALSE) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) type <- match.arg(type) alternative <- match.arg(alternative) if(!is.null(funX)) stopifnot(is.fv(funX)) pwrong <- NULL use.weights <- !is.null(weights) cheat <- !is.null(precomputed) if(is.null(argvals) && is.null(observed) && !is.null(funX)) { ## assume funX is summary function for observed data argvals <- with(funX, .x) observed <- with(funX, .y) argname.orig <- fvnames(funX, ".x") if(is.null(argname)) argname <- argname.orig if(is.null(arg.desc)) arg.desc <- attr(funX, "desc")[match(argname.orig, colnames(funX))] theory <- if(use.theory) (theory %orifnull% funX[["theo"]]) else NULL if(check) stopifnot(nrow(funX) == nrow(Y)) } else { ## construct envelope from raw data if(is.null(argname)) argname <- "r" if(is.null(arg.desc)) arg.desc <- "distance argument r" if(check) { ## validate vectors of data if(is.null(argvals)) stop("argvals must be supplied") if(is.null(observed)) stop("observed must be supplied") stopifnot(length(argvals) == nrow(Y)) stopifnot(length(observed) == length(argvals)) } } use.theory <- use.theory && !is.null(theory) if(use.theory && check) stopifnot(length(theory) == length(argvals)) simvals <- Y fX <- observed if(!is.null(funX)) { atr <- attributes(funX) fname <- atr$fname yexp <- atr$yexp } else { fname <- "f" yexp <- substitute(f(r), list(r=as.name(argname))) atr <- list(alim=range(argvals), ylab=yexp, yexp=yexp, fname="f") } NAvector <- rep(NA_real_, length(argvals)) if(!cheat) { ## ................ standard calculation ..................... ## validate weights if(use.weights && !gaveup) check.nvector(weights, ncol(simvals), things="simulated functions", naok=TRUE, vname="weights") ## determine numbers of columns used Ncol <- if(!gaveup) ncol(simvals) else Inf if(Ncol < 2) stop("Need at least 2 columns of function values") ## all columns are used unless 'nsim' or 'jsim' given. if(!(is.null(nsim) && is.null(jsim))) { if(is.null(jsim)) { jsim <- 1:nsim } else if(is.null(nsim)) { nsim <- length(jsim) } else stopifnot(length(jsim) == nsim) if(nsim > Ncol) stop(paste(nsim, "simulations are not available; only", Ncol, "columns provided")) } ## nsim2 or jsim.mean may be given, and imply dual calculation if(!(is.null(nsim2) && is.null(jsim.mean))) { if(is.null(jsim.mean)) { jsim.mean <- setdiff(seq_len(Ncol), jsim)[1:nsim2] } else if(is.null(nsim2)) { nsim2 <- length(jsim.mean) } else stopifnot(length(jsim.mean) == nsim2) if(nsim + nsim2 > Ncol) stop(paste(nsim, "+", nsim2, "=", nsim+nsim2, "simulations are not available; only", Ncol, "columns provided")) if(length(intersect(jsim, jsim.mean))) warning("Internal warning: Indices in jsim and jsim.mean overlap") } restrict.columns <- !is.null(jsim) dual <- !is.null(jsim.mean) } else { ## ................ precomputed values .................. ## validate weights if(use.weights) check.nvector(weights, nsim, things="simulations", naok=TRUE, vname="weights") restrict.columns <- FALSE dual <- FALSE } shadenames <- NULL nsim.mean <- NULL switch(type, pointwise = { ## ....... POINTWISE ENVELOPES ............................... if(gaveup) { lo <- hi <- NAvector } else if(cheat) { stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi } else { simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[,jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(nrank == 1L) { lohi <- apply(simvals, 1L, range) } else { lohi <- apply(simvals, 1L, # function(x, n) { sort(x)[n] }, orderstats, k=c(nrank, nsim-nrank+1L)) } lo <- lohi[1L,] hi <- lohi[2L,] } lo.name <- "lower pointwise envelope of %s from simulations" hi.name <- "upper pointwise envelope of %s from simulations" ## if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper limit" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower limit" }) if(use.theory) { results <- data.frame(r=argvals, obs=fX, theo=theory, lo=lo, hi=hi) } else { m <- if(gaveup) NAvector else if(cheat) precomputed$mmean else if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) results <- data.frame(r=argvals, obs=fX, mmean=m, lo=lo, hi=hi) } colnames(results)[1] <- argname shadenames <- c("lo", "hi") if(do.pwrong) { ## estimate the p-value for the 'wrong test' if(gaveup) { pwrong <- NA_real_ } else if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { dataranks <- t(apply(simvals, 1, rank, ties.method="random")) upper.signif <- (dataranks <= nrank) lower.signif <- (dataranks >= nsim-nrank+1L) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } }, global = { ## ..... SIMULTANEOUS ENVELOPES .......................... if(gaveup) { lo <- hi <- reference <- NAvector } else if(cheat) { ## ... use precomputed values .. stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi if(use.theory) { reference <- theory } else { stopifnot(checkfields(precomputed, "mmean")) reference <- precomputed$mmean } domain <- rep.int(TRUE, length(argvals)) } else { ## ... normal case: compute envelopes from simulations if(!is.null(ginterval)) { domain <- (argvals >= ginterval[1L]) & (argvals <= ginterval[2L]) funX <- funX[domain, ] simvals <- simvals[domain, ] } else domain <- rep.int(TRUE, length(argvals)) simvals[is.infinite(simvals)] <- NA if(use.theory) { reference <- theory[domain] if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } } else if(dual) { # Estimate the mean from one set of columns # Form envelopes from another set of columns simvals.mean <- simvals[, jsim.mean] # mmean <- reference <- if(!use.weights) apply(simvals.mean, 1L, mean, na.rm=TRUE) else apply(simvals.mean, 1L, weighted.mean, w=weights[jsim.mean], na.rm=TRUE) nsim.mean <- ncol(simvals.mean) # retain only columns used for envelope simvals <- simvals[, jsim] } else { # Compute the mean and envelopes using the same data if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } # mmean <- reference <- if(!use.weights) apply(simvals, 1L, mean, na.rm=TRUE) else apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) } nsim <- ncol(simvals) # compute deviations deviations <- sweep(simvals, 1L, reference) deviations <- switch(alternative, two.sided = abs(deviations), greater = if(clamp) pmax(0, deviations) else deviations, less = if(clamp) pmax(0, -deviations) else (-deviations)) deviations <- matrix(deviations, nrow=nrow(simvals), ncol=ncol(simvals)) ## rescale ? sc <- 1 if(!is.null(scale)) { stopifnot(is.function(scale)) sc <- scale(argvals) sname <- paste0("scale", paren(argname)) tname <- paste("values of", argname) ans <- check.nvector(sc, length(argvals), things=tname, fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (sc <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[argvals > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) sc[bad] <- 1 } deviations <- sweep(deviations, 1L, sc, "/") } ## compute max (scaled) deviations suprema <- apply(deviations, 2L, max, na.rm=TRUE) # ranked deviations dmax <- sort(suprema)[nsim-nrank+1L] # simultaneous bands lo <- reference - sc * dmax hi <- reference + sc * dmax } lo.name <- "lower critical boundary for %s" hi.name <- "upper critical boundary for %s" if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" }, greater = { lo <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" }) if(use.theory) { results <- data.frame(r=argvals[domain], obs=fX[domain], theo=reference, lo=lo, hi=hi) } else { results <- data.frame(r=argvals[domain], obs=fX[domain], mmean=reference, lo=lo, hi=hi) } colnames(results)[1] <- argname shadenames <- c("lo", "hi") if(do.pwrong) warning(paste("Argument", sQuote("do.pwrong=TRUE"), "ignored;", "it is not relevant to global envelopes")) }, variance={ ## ....... POINTWISE MEAN, VARIANCE etc ...................... if(gaveup) { Ef <- varf <- NAvector } else if(cheat) { # .... use precomputed values .... stopifnot(checkfields(precomputed, c("Ef", "varf"))) Ef <- precomputed$Ef varf <- precomputed$varf } else { ## .... normal case: compute from simulations simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(!use.weights) { Ef <- apply(simvals, 1L, mean, na.rm=TRUE) varf <- apply(simvals, 1L, var, na.rm=TRUE) } else { Ef <- apply(simvals, 1L, weighted.mean, w=weights, na.rm=TRUE) varf <- apply(simvals, 1L, weighted.var, w=weights, na.rm=TRUE) } } if(gaveup) { sd <- stdres <- lo <- hi <- loCI <- hiCI <- NAvector } else { ## derived quantities sd <- sqrt(varf) stdres <- (fX-Ef)/sd stdres[!is.finite(stdres)] <- NA ## critical limits lo <- Ef - nSD * sd hi <- Ef + nSD * sd ## confidence interval loCI <- Ef - nSD * sd/sqrt(nsim) hiCI <- Ef + nSD * sd/sqrt(nsim) } lo.name <- paste("lower", nSD, "sigma critical limit for %s") hi.name <- paste("upper", nSD, "sigma critical limit for %s") loCI.name <- paste("lower", nSD, "sigma confidence bound", "for mean of simulated %s") hiCI.name <- paste("upper", nSD, "sigma confidence bound", "for mean of simulated %s") ## if(!gaveup) switch(alternative, two.sided = { }, less = { hi <- hiCI <- rep.int(Inf, length(hi)) hi.name <- "infinite upper boundary" hiCI.name <- "infinite upper confidence limit" }, greater = { lo <- loCI <- rep.int(-Inf, length(lo)) lo.name <- "infinite lower boundary" loCI.name <- "infinite lower confidence limit" }) ## put together if(use.theory) { results <- data.frame(r=argvals, obs=fX, theo=theory, lo=lo, hi=hi) colnames(results)[1] <- argname shadenames <- c("lo", "hi") morestuff <- data.frame(mmean=Ef, var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, NULL, fname, "loCI", argname=argname) hiCIlabel <- if(alternative == "less" && !gaveup) "infinity" else makefvlabel(NULL, NULL, fname, "hiCI", argname=argname) mslabl <- c(makefvlabel(NULL, "bar", fname, argname=argname), makefvlabel("var", "hat", fname, argname=argname), makefvlabel("res", "hat", fname, argname=argname), makefvlabel("stdres", "hat", fname, argname=argname), loCIlabel, hiCIlabel) wted <- if(use.weights) "weighted " else NULL msdesc <- c(paste0(wted, "sample mean of %s from simulations"), paste0(wted, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } else { results <- data.frame(r=argvals, obs=fX, mmean=Ef, lo=lo, hi=hi) colnames(results)[1] <- argname shadenames <- c("lo", "hi") morestuff <- data.frame(var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) loCIlabel <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, NULL, fname, "loCI", argname=argname) hiCIlabel <- if(alternative == "less" && !gaveup) "infinity" else makefvlabel(NULL, NULL, fname, "hiCI", argname=argname) mslabl <- c(makefvlabel("var", "hat", fname, argname=argname), makefvlabel("res", "hat", fname, argname=argname), makefvlabel("stdres", "hat", fname, argname=argname), loCIlabel, hiCIlabel) msdesc <- c(paste0(if(use.weights) "weighted " else NULL, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } if(do.pwrong) { ## estimate the p-value for the 'wrong test' if(gaveup) { pwrong <- NA_real_ } else if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { upper.signif <- (simvals > hi) lower.signif <- (simvals < lo) is.signif <- switch(alternative, less = lower.signif, greater = upper.signif, two.sided = lower.signif | upper.signif) # is.signif.somewhere <- apply(is.signif, 2, any) is.signif.somewhere <- matcolany(is.signif) pwrong <- sum(is.signif.somewhere)/nsim } } } ) ############ WRAP UP ######################### if(use.theory) { # reference is computed curve `theo' reflabl <- makefvlabel(NULL, NULL, fname, "theo", argname=argname) refdesc <- paste0("theoretical value of %s", if(csr) " for CSR" else NULL) } else { # reference is sample mean of simulations reflabl <- makefvlabel(NULL, "bar", fname, argname=argname) refdesc <- paste0(if(use.weights) "weighted " else NULL, "sample mean of %s from simulations") } lolabl <- if(alternative == "greater" && !gaveup) "-infinity" else makefvlabel(NULL, "hat", fname, "lo", argname=argname) hilabl <- if(alternative == "less"&& !gaveup) "infinity" else makefvlabel(NULL, "hat", fname, "hi", argname=argname) result <- fv(results, argu=argname, ylab=atr$ylab, valu="obs", fmla= paste(". ~", argname), alim=intersect.ranges(atr$alim, range(results[[argname]])), labl=c(argname, makefvlabel(NULL, "hat", fname, "obs", argname=argname), reflabl, lolabl, hilabl), desc=c(arg.desc, "observed value of %s for data pattern", refdesc, lo.name, hi.name), fname=atr$fname, yexp =atr$yexp) # columns to be plotted by default dotty <- c("obs", if(use.theory) "theo" else "mmean", "hi", "lo") if(type == "variance") { # add more stuff result <- bind.fv(result, morestuff, mslabl, msdesc) if(use.theory) dotty <- c(dotty, "mmean") } fvnames(result, ".") <- dotty fvnames(result, ".s") <- shadenames unitname(result) <- unitname(funX) class(result) <- c("envelope", class(result)) # tack on envelope information attr(result, "einfo") <- list(global = (type =="global"), ginterval = ginterval, alternative=alternative, scale = scale, clamp = clamp, csr = csr, use.theory = use.theory, csr.theo = csr && use.theory, simtype = "funs", constraints = "", nrank = nrank, nsim = nsim, VARIANCE = (type == "variance"), nSD = nSD, valname = NULL, dual = dual, nsim = nsim, nsim2 = nsim.mean, Yname = Yname, do.pwrong=do.pwrong, use.weights=use.weights, gaveup = gaveup) # tack on saved functions if(savefuns && !gaveup) { nSim <- ncol(Y) alldata <- cbind(argvals, Y) simnames <- paste("sim", 1:nSim, sep="") colnames(alldata) <- c(argname, simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu=argname, ylab=atr$ylab, valu="sim1", fmla= paste(". ~", argname), alim=atr$alim, labl=names(alldata), desc=c(arg.desc, paste("Simulation ", 1:nSim, sep="")), unitname=unitname(funX)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } if(do.pwrong) attr(result, "pwrong") <- pwrong if(use.weights) attr(result, "weights") <- weights return(result) } envelope.envelope <- function(Y, fun=NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) { Yname <- short.deparse(substitute(Y)) stopifnot(inherits(Y, "envelope")) Yorig <- Y aargh <- list(...) X <- attr(Y, "datapattern") sf <- attr(Y, "simfuns") sp <- attr(Y, "simpatterns") wt <- attr(Y, "weights") einfo <- attr(Y, "einfo") csr <- aargh$internal$csr %orifnull% einfo$csr if(is.null(fun) && is.null(sf)) { # No simulated functions - must compute them from simulated patterns if(is.null(sp)) stop(paste("Cannot compute envelope:", "Y does not contain simulated functions", "(was not generated with savefuns=TRUE)", "and does not contain simulated patterns", "(was not generated with savepatterns=TRUE)")) # set default fun=Kest fun <- Kest } if(!is.null(fun)) { # apply new function # point patterns are required if(is.null(sp)) stop(paste("Object Y does not contain simulated point patterns", "(attribute", dQuote("simpatterns"), ");", "cannot apply a new", sQuote("fun"))) if(is.null(X)) stop(paste("Cannot apply a new", sQuote("fun"), "; object Y generated by an older version of spatstat")) ## send signal if simulations were CSR internal <- aargh$internal if(csr) { if(is.null(internal)) internal <- list() internal$csr <- TRUE } ## compute new envelope result <- do.call(envelope, resolve.defaults(list(Y=quote(X), fun=fun, simulate=sp), aargh, list(transform=transform, global=global, VARIANCE=VARIANCE, internal=internal, Yname=Yname, nsim=einfo$nsim, nsim2=einfo$nsim2, weights=wt), .StripNull=TRUE)) } else { #' compute new envelope with existing simulated functions if(is.null(sf)) stop(paste("Y does not contain a", dQuote("simfuns"), "attribute", "(it was not generated with savefuns=TRUE)")) if(!is.null(transform)) { # Apply transformation to Y and sf stopifnot(is.expression(transform)) ## cc <- dotexpr.to.call(transform, "Y", "eval.fv") cc <- inject.expr("with(Y, .)", transform) Y <- eval(cc) ## cc <- dotexpr.to.call(transform, "sf", "eval.fv") cc <- inject.expr("with(sf, .)", transform) sf <- eval(cc) } #' catch discrepancy between domains of observed and simulated functions if(nrow(sf) != nrow(Y)) { rrsim <- sf[[fvnames(sf, ".x")]] rrobs <- Y[[fvnames(Y, ".x")]] ra <- intersect.ranges(range(rrsim), range(rrobs)) delta <- min(mean(diff(rrsim)), mean(diff(rrobs)))/2 oksim <- (rrsim >= ra[1] - delta) & (rrsim <= ra[2] + delta) okobs <- (rrobs >= ra[1] - delta) & (rrobs <= ra[2] + delta) if(sum(oksim) != sum(okobs)) stop("Internal error: Unable to reconcile the domains", "of the observed and simulated functions", call.=FALSE) if(mean(abs(rrsim[oksim] - rrobs[okobs])) > delta) stop("Internal error: Unable to reconcile the r values", "of the observed and simulated functions", call.=FALSE) sf <- sf[oksim, ,drop=FALSE] Y <- Y[okobs, ,drop=FALSE] } # extract simulated function values df <- as.data.frame(sf) rname <- fvnames(sf, ".x") df <- df[, (names(df) != rname)] # interface with 'envelope.matrix' etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" dfm <- as.matrix(df) dont.complain.about(dfm) result <- do.call(envelope.matrix, resolve.defaults(list(Y=quote(dfm)), aargh, list(type=etype, csr=csr, funX=Y, Yname=Yname, weights=wt), .StripNull=TRUE)) } if(!is.null(transform)) { # post-process labels labl <- attr(result, "labl") dnames <- colnames(result) dnames <- dnames[dnames %in% fvnames(result, ".")] # expand "." ud <- as.call(lapply(c("cbind", dnames), as.name)) dont.complain.about(ud) expandtransform <- eval(substitute(substitute(tr, list(.=ud)), list(tr=transform[[1L]]))) # compute new labels attr(result, "fname") <- attr(Yorig, "fname") mathlabl <- as.character(fvlegend(result, expandtransform)) # match labels to columns evars <- all.vars(expandtransform) used.dotnames <- evars[evars %in% dnames] mathmap <- match(colnames(result), used.dotnames) okmath <- !is.na(mathmap) # update appropriate labels labl[okmath] <- mathlabl[mathmap[okmath]] attr(result, "labl") <- labl } # Tack on envelope info copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(Yorig, "einfo")[copyacross] attr(result, "einfo")$csr <- csr # Save data return(result) } pool.envelope <- local({ pool.envelope <- function(..., savefuns=FALSE, savepatterns=FALSE) { Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") Elist <- unname(list(...)) nE <- length(Elist) if(nE == 0) return(NULL) #' ........ validate envelopes ..................... #' All arguments must be envelopes notenv <- !unlist(lapply(Elist, inherits, what="envelope")) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("envelope")) stop(why) } E1 <- Elist[[1L]] ## Only one envelope? if(nE == 1) return(E1) ## envelopes must be compatible ok <- do.call(compatible, Elist) if(!ok) stop("Envelopes are not compatible") ## find name of function argument argname <- fvnames(E1, ".x") arg.desc <- attr(E1, "desc")[match(argname, colnames(E1))] ## ... reconcile parameters in different envelopes ....... eilist <- lapply(Elist, attr, which="einfo") nrank <- resolveEinfo(eilist, "nrank", 1) global <- resolveEinfo(eilist, "global", FALSE) ginterval <- resolveEinfo(eilist, "ginterval", NULL, atomic=FALSE) VARIANCE <- resolveEinfo(eilist, "VARIANCE", FALSE) alternative <- resolveEinfo(eilist, "alternative", FALSE) scale <- resolveEinfo(eilist, "scale", NULL, atomic=FALSE) clamp <- resolveEinfo(eilist, "clamp", FALSE) resolveEinfo(eilist, "simtype", "funs", "Envelopes were generated using different types of simulation") resolveEinfo(eilist, "constraints", "", "Envelopes were generated using different types of conditioning") resolveEinfo(eilist, "csr.theo", FALSE, NULL) csr <- resolveEinfo(eilist, "csr", FALSE, NULL) use.weights <- resolveEinfo(eilist, "use.weights" , FALSE, "Weights were used in some, but not all, envelopes: they will be ignored") use.theory <- resolveEinfo(eilist, "use.theory", csr, NULL) ## weights <- if(use.weights) unlist(lapply(Elist, attr, which="weights")) else NULL type <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" ## ........ validate saved functions ..................... if(savefuns || !VARIANCE) { ## Individual simulated functions are required SFlist <- lapply(Elist, attr, which="simfuns") isnul <- unlist(lapply(SFlist, is.null)) if(any(isnul)) { n <- sum(isnul) comply <- if(!VARIANCE) "compute the envelope:" else "save the simulated functions:" why <- paste("Cannot", comply, ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simfuns"), "attribute", "(not generated with savefuns=TRUE)") stop(why) } ## Simulated functions must be the same function fnames <- unique(lapply(SFlist, attr, which="fname")) if(length(fnames) > 1L) { fnames <- unlist(lapply(fnames, flatfname)) stop(paste("Envelope objects contain values", "of different functions:", commasep(sQuote(fnames)))) } ## vectors of r values must be identical rlist <- lapply(SFlist, getargvals) argvals <- rlist[[1L]] samer <- unlist(lapply(rlist, identical, y=argvals)) if(!all(samer)) stop(paste("Simulated function values are not compatible", "(different values of function argument)")) ## Extract function values and assemble into one matrix matlist <- lapply(SFlist, getdotvals) SFmatrix <- do.call(cbind, matlist) } ## compute pooled envelope switch(type, pointwise = { result <- envelope(SFmatrix, funX=E1, type=type, alternative=alternative, clamp=clamp, nrank=nrank, csr=csr, use.theory=use.theory, Yname=Yname, weights=weights, savefuns=savefuns) }, global = { simfunmatrix <- if(is.null(ginterval)) SFmatrix else { ## savefuns have not yet been clipped to ginterval ## while envelope data have been clipped. domain <- (argvals >= ginterval[1L]) & (argvals <= ginterval[2L]) SFmatrix[domain, , drop=FALSE] } result <- envelope(simfunmatrix, funX=E1, type=type, alternative=alternative, scale=scale, clamp=clamp, csr=csr, use.theory=use.theory, nrank=nrank, ginterval=ginterval, Yname=Yname, weights=weights, savefuns=savefuns) }, variance = { ## Pool sample means and variances nsims <- unlist(lapply(eilist, getElement, name="nsim")) mmeans <- lapply(Elist, getElement, name="mmean") vars <- lapply(Elist, getElement, name="var") mmeans <- matrix(unlist(mmeans), ncol=nE) vars <- matrix(unlist(vars), ncol=nE) if(!use.weights) { w.mean <- nsims d.mean <- sum(nsims) w.var <- nsims - 1 d.var <- sum(nsims) - 1 } else { weightlist <- lapply(Elist, attr, which="weights") w.mean <- unlist(lapply(weightlist, sum)) d.mean <- sum(w.mean) ssw <- unlist(lapply(weightlist, meansqfrac)) ## meansqfrac : function(x) {sum((x/sum(x))^2)})) w.var <- w.mean * (1 - ssw) d.var <- d.mean * (1 - sum(ssw)) } poolmmean <- as.numeric(mmeans %*% matrix(w.mean/d.mean, ncol=1L)) within <- vars %*% matrix(w.var, ncol=1L) between <- ((mmeans - poolmmean[])^2) %*% matrix(w.mean, ncol=1L) poolvar <- as.numeric((within + between)/d.var) ## feed precomputed data to envelope.matrix pc <- list(Ef=poolmmean[], varf=poolvar[]) nsim <- sum(nsims) result <- envelope.matrix(NULL, funX=E1, type=type, alternative=alternative, csr=csr, Yname=Yname, weights=weights, savefuns=savefuns, nsim=nsim, precomputed=pc) }) ## Copy envelope info that is not handled by envelope.matrix copyacross <- c("Yname", "csr.theo", "use.theory", "simtype", "constraints") attr(result, "einfo")[copyacross] <- attr(E1, "einfo")[copyacross] ## ..............saved patterns ..................... if(savepatterns) { SPlist <- lapply(Elist, attr, which="simpatterns") isnul <- unlist(lapply(SPlist, is.null)) if(any(isnul)) { n <- sum(isnul) why <- paste("Cannot save the simulated patterns:", ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simpatterns"), "attribute", "(not generated with savepatterns=TRUE)") warning(why) } else { attr(result, "simpatterns") <- Reduce(append, SPlist) } } ## ..............saved summary functions ................ if(savefuns) { alldata <- cbind(argvals, SFmatrix) Nsim <- ncol(SFmatrix) simnames <- paste0("sim", 1:Nsim) colnames(alldata) <- c(argname, simnames) alldata <- as.data.frame(alldata) SFtemplate <- SFlist[[1L]] SimFuns <- fv(alldata, argu=argname, ylab=attr(SFtemplate, "ylab"), valu="sim1", fmla=paste(". ~", argname), alim=attr(SFtemplate, "alim"), labl=names(alldata), desc=c(arg.desc, paste("Simulation ", 1:Nsim, sep="")), fname=attr(SFtemplate, "fname"), yexp=attr(SFtemplate, "yexp"), unitname=unitname(SFtemplate)) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } dotnames <- lapply(Elist, fvnames, a=".") dn <- dotnames[[1L]] if(all(unlist(lapply(dotnames, identical, y=dn)))) fvnames(result, ".") <- dn shadenames <- lapply(Elist, fvnames, a=".s") sh <- shadenames[[1L]] if(all(unlist(lapply(shadenames, identical, y=sh)))) fvnames(result, ".s") <- sh return(result) } getargvals <- function(z) { as.matrix(z)[, fvnames(z, ".x")] } getdotvals <- function(z) { as.matrix(z)[, fvnames(z, "."), drop=FALSE] } meansqfrac <- function(x) {sum((x/sum(x))^2)} pool.envelope }) # resolve matching entries in different envelope objects # x is a list of envelope info objects resolveEinfo <- function(x, what, fallback, warn, atomic=TRUE) { if(atomic) { y <- unique(unlist(lapply(x, getElement, name=what))) if(length(y) == 1L) return(y) } else { y <- unique(lapply(x, getElement, name=what)) if(length(y) == 1L) return(y[[1L]]) } if(missing(warn)) warn <- paste("Envelopes were generated using different values", "of argument", paste(sQuote(what), ";", sep=""), "reverting to default value") if(!is.null(warn)) warning(warn, call.=FALSE) return(fallback) } as.data.frame.envelope <- function(x, ..., simfuns=FALSE) { if(simfuns && !is.null(sf <- attr(x, "simfuns"))) { # tack on the simulated functions as well y <- as.data.frame(bind.fv(x, sf, clip=TRUE)) return(y) } NextMethod("as.data.frame") } spatstat.explore/R/localKcross.R0000644000176200001440000003234114611073310016374 0ustar liggesusers#' #' localKcross.R #' #' original by Ege Rubak #' #' $Revision: 1.18 $ $Date: 2023/02/28 02:06:58 $ "localLcross" <- function(X, from, to, ..., rmax = NULL, correction = "Ripley") { localKcross(X, from, to, ..., rmax = rmax, correction = correction, wantL = TRUE) } "localLdot" <- function(X, from, ..., rmax = NULL, correction = "Ripley") { localKdot(X, from, ..., rmax = rmax, correction = correction, wantL = TRUE) } "localKcross" <- function(X, from, to, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] if(missing(to)) to <- levels(marx)[2] I <- (marx == from) if(!any(I)) stop(paste("No points have mark =", from)) Iexplain <- paste("points having mark =", from) Ikey <- make.parseable(paste(from)) if(from == to) { ## use Kest XI <- X[I] dont.complain.about(XI) result <- do.call(localK, resolve.defaults(list(X=quote(XI), rmax=rmax, correction=correction, verbose=verbose, rvalue=rvalue), list(...))) } else { J <- (marx == to) if(!any(J)) stop(paste("No points have mark =", to)) Jexplain <- paste("points having mark =", to) Jkey <- make.parseable(paste(to)) result <-localKmultiEngine(X, I, J, ..., Ikey=Ikey, Jkey=Jkey, Iexplain=Iexplain, Jexplain=Jexplain, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } return(result) } "localKdot" <- function(X, from, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] I <- (marx == from) J <- rep.int(TRUE, X$n) # i.e. all points Iexplain <- paste("points having mark =", from) Jexplain <- "points of any type" Ikey <- make.parseable(paste(from)) Jkey <- "." if(!any(I)) stop(paste("No points have mark =", from)) result <- localKmultiEngine(X, I, J, ..., Iexplain=Iexplain, Jexplain=Jexplain, Ikey=Ikey, Jkey=Jkey, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) attr(result, "indices") <- list(from=from) return(result) } "localKcross.inhom" <- function(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, ..., rmax = NULL, correction = "Ripley", sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(from)) from <- levels(marx)[1] if(missing(to)) to <- levels(marx)[2] I <- (marx == from) J <- (marx == to) Iexplain <- paste("points having mark =", from) Jexplain <- paste("points having mark =", to) Ikey <- make.parseable(paste(from)) Jkey <- make.parseable(paste(to)) K <- localKmultiEngine(X, I, J, lambdaFrom, lambdaTo, ..., rmax = rmax, Iexplain=Iexplain, Jexplain=Jexplain, Ikey=Ikey, Jkey=Jkey, correction=correction, sigma=sigma, varcov=varcov, lambdaX=lambdaX, update=update, leaveoneout=leaveoneout) attr(K, "indices") <- list(from=from, to=to) return(K) } localLcross.inhom <- function(X, from, to, lambdaFrom = NULL, lambdaTo = NULL, ..., rmax = NULL) { localKcross.inhom(X, from, to, lambdaFrom, lambdaTo, ..., rmax = rmax, wantL = TRUE) } "localKmultiEngine" <- function(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, ..., rmax = NULL, wantL=FALSE, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE, Iexplain="points satisfying condition I", Jexplain="points satisfying condition J", Ikey="I", Jkey="J") { npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda.ave <- npts/areaW from <- ppsubset(X, from, "from") to <- ppsubset(X, to, "to") if(is.null(from) || is.null(to)) stop("from and to must be valid subset indices") if(!any(from)) stop("no points belong to subset from") if(!any(to)) stop("no points belong to subset to") X_from <- X[from] X_to <- X[to] n_from <- sum(from) n_to <- sum(to) lambdaFrom.ave <- n_from/areaW lambdaTo.ave <- n_to/areaW weighted <- !is.null(lambdaFrom) || !is.null(lambdaTo) || !is.null(lambdaX) if(weighted){ lambdas <- resolve.lambdacross(X, from, to, lambdaFrom, lambdaTo, ..., lambdaX = lambdaX, sigma = sigma, varcov = varcov, leaveoneout = leaveoneout, update = update, Iexplain=Iexplain, Jexplain=Jexplain) lambdaFrom <- lambdas$lambdaI lambdaTo <- lambdas$lambdaJ } if(is.null(rvalue)) rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- crosspairs(X_from, X_to, rmax) # close$i and close$j are serial numbers in X_from and X_to respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[from] jmap <- orig[to] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(from & to)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] close$xi <- close$xi[ok] close$xj <- close$xj[ok] close$yi <- close$yi[ok] close$yj <- close$yj[ok] } } # extract information for these pairs (relative to orderings of X_from, X_to) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i J <- close$j if(weighted) { ## lambdaI <- lambdaFrom[I] ## not used lambdaJ <- lambdaTo[J] ## weightI <- 1/lambdaI ## not used weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), n_from)) labl <- desc <- character(n_from) if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:n_from) { ii <- (I == i) ## Below wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights Knone <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Knone <- Knone * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Knone icode <- numalign(i, n_from) names(df)[i] <- paste("un", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:n_from) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Ktrans <- Ktrans * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Ktrans icode <- numalign(i, n_from) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:n_from) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) ## Tweaking factor to express Kcross.inhom as unweighted average of local contrib. if(weighted) Kiso <- Kiso * lambdaFrom.ave/lambdaFrom[i] df[,i] <- Kiso icode <- numalign(i, n_from) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, n_from, state=state) } if(!weighted) df <- df/lambdaTo.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } ## function value table required ## add r and theo df <- cbind(df, data.frame(r=r, theo=if(wantL) r else (pi * r^2))) desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "{%s[%s]^{pois}}(r)")) ## Handle 'dot' symbol if(identical(Jkey, ".")) { Jkeyname <- "symbol(\"\\267\")" Jkeylab <- quote(dot) Jkeyexpr <- quote(symbol("\267")) } else Jkeyname <- Jkeylab <- Jkeyexpr <- Jkey ## Determine fv labels if(!wantL) { if(!weighted) { fnam <- c("K", paste0("list(loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(K[loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(K[list(loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } else { fnam <- c("K", paste0("list(inhom,loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(K[inhom,loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(K[list(inhom,loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } } else { if(!weighted) { fnam <- c("L", paste0("list(loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(L[loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(L[list(loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } else { fnam <- c("L", paste0("list(inhom,loc,", Ikey, ",", Jkeyname, ")")) ylab <- substitute(L[inhom,loc,I,J](r), list(I=Ikey, J=Jkeylab)) yexp <- substitute(L[list(inhom,loc,I,J)](r), list(I=Ikey, J=Jkeyexpr)) } } # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam, yexp=yexp) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction if(weighted && lambdas$danger) attr(K, "dangerous") <- lambdas$dangerous ### TEMPORARY HACK to save info about the "from" points attr(K, "Xfrom") <- X_from return(K) } spatstat.explore/R/bw.pcf.R0000644000176200001440000001461314611073307015304 0ustar liggesusers#' #' bw.pcf.R #' #' $Revision: 1.7 $ $Date: 2022/05/23 02:33:06 $ #' #' bandwidth selection for pcf #' with least-squares cross-validation method #' #' Original code by: Rasmus Waagepetersen and Abdollah Jalilian #' #' References: #' Guan, Y. (2007). A composite likelihood cross-validation approach in #' selecting bandwidth for the estimation of the pair correlation function. #' Scandinavian Journal of Statistics, 34(2), 336--346. #' DOI: http://doi.org/10.1111/j.1467-9469.2006.00533.x #' Guan, Y. (2007). A least-squares cross-validation bandwidth #' selection approach in pair correlation function estimations. #' Statistics & Probability Letters, 77(18), 1722--1729. #' DOI: http://doi.org/10.1016/j.spl.2007.04.016 bw.pcf <- function(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, ..., verbose=FALSE, warn=TRUE) { stopifnot(is.ppp(X)) X <- unmark(X) win <- Window(X) areaW <- area(win) nX <- npoints(X) cv.method <- match.arg(cv.method) kernel <- match.kernel(kernel) #' maximum distance lag: rmax if (is.null(rmax)) rmax <- rmax.rule("K", win, nX/areaW) if(is.null(srange)) srange <- c(0, rmax/4) #' number of subintervals for discretization of [0, rmax]: nr #' length of subintervals discr <- rmax / nr #' breaks of subintervals rs <- seq(0, rmax, length.out= nr + 1) #' closepairs distances: \\ u - v \\ #' Pre-compute close pair distances for use in 'pcf' #' we need close pairs up to a distance rmax + smax #' where 'smax' is the maximum halfwidth of the support of the kernel smax <- srange[2] * (if(kernel == "gaussian") 2 else kernel.factor(kernel)) cpfull <- closepairs(X, rmax + smax, what="all", twice=TRUE) #' For cross-validation, restrict close pairs to distance rmax ok <- (cpfull$d <= rmax) cp <- lapply(cpfull, "[", i=ok) ds <- cp$d #' determining closepairs distances are in which subinterval idx <- round(ds / discr) + 1L idx <- pmin.int(idx, nr+1L) #' translation edge correction factor: /W|/|W \cap W_{u-v}| edgewt <- edge.Trans(dx=cp$dx, dy=cp$dy, W=win, paired=TRUE) if(homogeneous <- is.null(lambda)) { #' homogeneous case lambda <- nX/areaW lambda2area <- lambda^2 * areaW pcfargs <- list(X=X, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- 1 } else { # inhomogeneous case: lambda is assumed to be a numeric vector giving # the intensity at the points of the point pattern X check.nvector(lambda, nX, vname="lambda") lambda2area <- lambda[cp$i] * lambda[cp$j] * areaW pcfargs <- list(X=X, lambda=lambda, r=rs, divisor=divisor, kernel=kernel, correction="translate", close=cpfull) renorm.factor <- (areaW/sum(1/lambda)) } stuff <- list(cv.method=cv.method, kernel=kernel, homogeneous=homogeneous, bias.correct=bias.correct, simple = simple, discr=discr, rs=rs, cp=cp, ds=ds, idx=idx, edgewt=edgewt, pcfargs=pcfargs, lambda=lambda, lambda2area=lambda2area, renorm.factor=renorm.factor, show=verbose) stuff <- list2env(stuff) #' find optimum bandwidth z <- optimizeWithTrace(CVforPCF, srange, maximum=TRUE, stuff=stuff) #' pack up ox <- order(z$x) sigma <- z$x[ox] cv <- z$y[ox] criterion <- switch(cv.method, compLik = "composite likelihood cross-validation", leastSQ = "least squares cross-validation") result <- bw.optim(cv, sigma, which.max(cv), optimum="max", criterion = criterion, warnextreme=warn, hargnames=c("rmax", "srange"), unitname=unitname(X)) return(result) } CVforPCF <- function(bw, stuff) { stuff$bw <- bw with(stuff, { if(show) splat("bw=", bw) #' values of pair correlation at breaks of subintervals a <- append(pcfargs, list(bw=bw)) grs <- if(homogeneous) do.call(pcf.ppp, a) else do.call(pcfinhom, a) grs <- grs$trans #' bias correction if (bias.correct) { grs <- grs / pkernel(rs, kernel, 0, bw) dcorrec <- pkernel(ds, kernel, 0, bw) } else { dcorrec <- 1 } #' make sure that the estimated pair correlation at origin is finite if (!is.finite(grs[1])) grs[1] <- grs[2] #' approximate the pair correlation values at closepairs distances gds <- grs[idx] if(show) gds.save <- gds #' remove pairs to approximate the cross-validation term: g^{-(u, v)} wt <- edgewt / (2 * pi * ds * lambda2area * dcorrec) * renorm.factor if (simple) { gds <- gds - 2 * wt * dkernel(0, kernel, 0, bw) } else { cpi <- cp$i cpj <- cp$j for (k in 1:length(ds)) { exclude <- (cpi == cpi[k]) | (cpj == cpj[k]) gds[k] <- gds[k] - 2 * sum(wt[exclude] * dkernel(ds[k] - ds[exclude], kernel, 0, bw)) } } #' remove negative and zero values gds <- pmax.int(.Machine$double.eps, gds) if(show) { plot(gds.save, gds, xlab="g(d_[ij])", ylab="g(d_[ij])^{-ij}") abline(0,1) splat("range g_after/g_before = ", prange(range(gds/gds.save))) } #' compute value of objective function switch(cv.method, compLik={ #' composite likelihood cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g(r) r dr normconst <- 2 * pi * sum(grs * rs) * discr value <- mean(log(gds)) - log(normconst) if(show) splat("normconst = ", normconst) }, leastSQ={ #' least squares cross-validation #' the integral term: 2 \pi \int_{0}^{rmax} \hat g^2(r) r dr normconst <- 2 * pi * sum(grs^2 * rs) * discr value <- 2 * sum(gds * edgewt / (lambda2area)) - normconst if(show) splat("normconst = ", normconst) }, stop("Unrecognised cross-validation method")) if(show) splat("value=", value) return(value) }) } spatstat.explore/R/pcfmulti.R0000644000176200001440000001751214611073310015743 0ustar liggesusers# # pcfmulti.R # # $Revision: 1.12 $ $Date: 2023/04/06 00:03:01 $ # # multitype pair correlation functions # pcfcross <- function(X, i, j, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r","d"), ratio=FALSE) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) ## marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) ## result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname, ratio=ratio) ## iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(g[i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste(iname, jname, sep=",")))), new.yexp=substitute(g[list(i,j)](r), list(i=iname,j=jname))) return(result) } pcfdot <- function(X, i, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), divisor=c("r", "d"), ratio=FALSE) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL divisor <- match.arg(divisor) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- "points" result <- pcfmulti(X, I, J, ..., r=r, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, divisor=divisor, Iname=Iname, Jname=Jname, ratio=ratio) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(g[i ~ dot](r), list(i=iname)), c("g", paste0(iname, "~symbol(\"\\267\")")), new.yexp=substitute(g[i ~ symbol("\267")](r), list(i=iname))) return(result) } pcfmulti <- function(X, I, J, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), Iname="points satisfying condition I", Jname="points satisfying condition J", ratio=FALSE) { verifyclass(X, "ppp") # r.override <- !is.null(r) divisor <- match.arg(divisor) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) ## .......... indices I and J ............................. I <- ppsubset(X, I, "I") J <- ppsubset(X, J, "J") if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] # lambdaI <- nI/areaW lambdaJ <- nJ/areaW nIJ <- sum(I & J) samplesize <- npairs <- nI * nJ - nIJ lambdaIJarea <- npairs/areaW ## ........... kernel bandwidth and support ......................... if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambdaJ) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambdaJ) } ########## r values ############################ # handle argument r rmaxdefault <- rmax.rule("K", win, lambdaJ) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(I,J)") yexp <- quote(g[list(I,J)](r)) out <- ratfv(df=df, numer=NULL, denom=samplesize, argu="r", ylab=quote(g[I,J](r)), valu="theo", , alim=alim, labl=c("r", makefvlabel(NULL, NULL, fname, "Pois")), desc=c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=yexp, ratio=ratio) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# ## compute pairwise distances ## identify close pairs of points what <- if(any(correction == "translate")) "all" else "ijd" close <- crosspairs(XI, XJ, rmax+hmax, what=what) ## map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] ## eliminate any identical pairs if(nIJ > 0) { ok <- (iX != jX) if(!all(ok)) close <- as.list(as.data.frame(close)[ok, , drop=FALSE]) } ## extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i # jcloseJ <- close$j ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) gT <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.ratfv(out, quotient=data.frame(trans=gT), denominator=samplesize, labl=makefvlabel(NULL, "hat", fname, "Trans"), desc="translation-corrected estimate of %s", preferred="trans", ratio=ratio) } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt, denargs, lambdaIJarea, divisor)$g out <- bind.ratfv(out, quotient=data.frame(iso=gR), denominator=samplesize, labl=makefvlabel(NULL, "hat", fname, "Ripley"), desc="isotropic-corrected estimate of %s", preferred="iso", ratio=ratio) } ## sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) return(out) } spatstat.explore/R/relrisk.R0000644000176200001440000005477114611073310015603 0ustar liggesusers# # relrisk.R # # Estimation of relative risk # # $Revision: 1.68 $ $Date: 2023/06/25 02:23:43 $ # relrisk <- function(X, ...) UseMethod("relrisk") relrisk.ppp <- local({ relrisk.ppp <- function(X, sigma=NULL, ..., at=c("pixels", "points"), weights = NULL, varcov=NULL, relative=FALSE, adjust=1, edge=TRUE, diggle=FALSE, se=FALSE, wtype=c("value", "multiplicity"), casecontrol=TRUE, control=1, case, fudge=0) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) ## evaluate numerical weights (multiple columns not allowed) weights <- pointweights(X, weights=weights, parent=parent.frame()) weighted <- !is.null(weights) ## npts <- npoints(X) marx <- marks(X) imarks <- as.integer(marx) types <- levels(marx) ntypes <- length(types) if(ntypes == 1) stop("Data contains only one type of points") ## casecontrol <- casecontrol && (ntypes == 2) if((control.given || case.given) && !(casecontrol || relative)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE and", if(ntypes==2) "casecontrol=FALSE" else "there are more than 2 types of points")) } ## fudge constant if(missing(fudge) || is.null(fudge)) { fudge <- rep(0, ntypes) } else { check.nvector(fudge, ntypes, things="types of points", oneok=TRUE, vname="fudge") stopifnot(all(fudge >= 0)) if(length(fudge) == 1) fudge <- rep(fudge, ntypes) } ## initialise error report uhoh <- NULL ## prepare for analysis Y <- split(X) splitweights <- if(weighted) split(weights, marx) else rep(list(NULL), ntypes) uX <- unmark(X) ## compute bandwidth (default bandwidth selector is bw.relrisk) ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, adjust=adjust, bwfun=bw.relrisk, x=X) sigma <- ker$sigma varcov <- ker$varcov ## determine smoothing parameters if(bandwidth.is.infinite(sigma)) edge <- FALSE SmoothPars <- resolve.defaults(list(sigma=sigma, varcov=varcov, at=at, edge=edge, diggle=diggle), list(...)) ## threshold for 0/0 tinythresh <- 8 * .Machine$double.eps ## if(se) { ## standard error calculation wtype <- match.arg(wtype) weightspower <- if(is.null(weights)) NULL else switch(wtype, value = weights^2, multiplicity = weights) if(!is.null(weights) && wtype == "multiplicity" && min(weights) < 0) stop("Negative weights are not permitted when wtype='multiplicity'", call.=FALSE) ## determine smoothing parameters for variance calculation VarPars <- SmoothPars VarPars$edge <- VarPars$diggle <- FALSE kernel <- SmoothPars$kernel %orifnull% "gaussian" if(!identical(kernel, "gaussian")) { ## Any kernel other than Gaussian. ## The square of the kernel will be computed inside second.moment.engine VarPars$kerpow <- 2 varconst <- 1 } else { ## Gaussian kernel. ## Use the fact that the square of the Gaussian kernel ## is a rescaled Gaussian kernel. if(bandwidth.is.infinite(sigma)) { varconst <- 1 } else if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(sigma)) VarPars$sigma <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) VarPars$varcov <- varcov/2 } } if(edge) { ## evaluate edge correction weights edgeim <- do.call(second.moment.calc, append(list(x=uX, what="edge"), SmoothPars)) if(diggle || at == "points") { edgeX <- safelookup(edgeim, uX, warn=FALSE) invmassX <- 1/edgeX invmassX[!is.finite(invmassX)] <- 0 } edgeim <- edgeim[Window(X), drop=FALSE] } } ## ......................................... ## compute intensity estimates for each type ## ......................................... switch(at, pixels = { ## intensity estimates of each type Deach <- do.call(density.splitppp, append(list(x=Y, weights=splitweights), SmoothPars)) if(any(fudge != 0)) { ## add constant to intensity estimates Deach <- as.imlist(mapply("+", Deach, as.list(fudge), SIMPLIFY=FALSE)) } ## compute intensity estimate for unmarked pattern Dall <- im.apply(Deach, sum, check=FALSE) ## WAS: Dall <- Reduce("+", Deach) ## variance terms if(se) { ## weights on each data point for variance calculation VarWeights <- if(!edge) { ## no edge correction weightspower } else if(!diggle) { ## uniform edge correction e(u) weightspower } else { ## Jones-Diggle edge correction e(x_i) if(weighted) {invmassX^2 * weightspower} else invmassX^2 } VarWeightsSplit <- if(weighted) split(VarWeights, marx) else NULL ## Compute variance of sum of weighted contributions Veach <- do.call(density.splitppp, append(list(x=Y, weights=VarWeightsSplit), VarPars)) if(edge && !diggle) { ## uniform edge correction e(u): rescale Veach <- imagelistOp(Veach, edgeim^2, "/") #' Ops.imlist not yet working } if(varconst != 1) Veach <- imagelistOp(Veach, varconst, "*") #' Ops.imlist not yet working Vall <- im.apply(Veach, sum, check=FALSE) ## WAS: Vall <- Reduce("+", Veach) } }, points = { ## intensity estimates of each type **at each data point** ## dummy variable matrix dumm <- matrix(0, npts, ntypes) dumm[cbind(seq_len(npts), imarks)] <- 1 colnames(dumm) <- types dummweights <- if(weighted) dumm * weights else dumm dummweightspower <- if(weighted) dumm * weightspower else dumm Deach <- do.call(density.ppp, append(list(x=uX, weights=dummweights), SmoothPars)) ## add constant to intensity estimates if(any(fudge != 0)) Deach <- Deach + matrix(fudge[col(Deach)], nrow=nrow(Deach), ncol=ncol(Deach)) ## compute intensity estimate for unmarked pattern Dall <- rowSums(Deach) ## variance terms if(se) { ## weights attached to data points for variance calculation VarWeights <- if(!edge) { ## no edge correction dummweightspower } else if(!diggle) { ## uniform edge correction e(u) dummweightspower } else { ## Jones-Diggle edge correction e(x_i) dummweightspower * invmassX^2 } ## compute sum of weighted contributions Veach <- do.call(density.ppp, append(list(x=uX, weights=VarWeights), VarPars)) if(edge && !diggle) { ## uniform edge correction e(u) Veach <- Veach * invmassX^2 } if(varconst != 1) Veach <- Veach * varconst Vall <- rowSums(Veach) } }) ## ......................................... ## compute probabilities/risks ## ......................................... if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", sQuote(control))) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", sQuote(case))) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } ## compute ...... switch(at, pixels = { ## compute probability of case Dcase <- Deach[[icase]] pcase <- Dcase/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values, and similar dodgy <- (Dall < tinythresh) nbg <- badvalues(pcase) | really(dodgy) if(any(nbg)) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## apply l'Hopital's rule: ## p(case) = 1{nearest neighbour is case} distcase <- distmap(Y[[icase]], xy=pcase) distcontrol <- distmap(Y[[icontrol]], xy=pcase) closecase <- eval.im(as.integer(distcase < distcontrol)) pcase[nbg] <- closecase[nbg] } if(!relative) { if(!se) { result <- pcase } else { Vcase <- Veach[[icase]] NUM <- eval.im(Vcase * (1-2*pcase) + Vall * pcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dall) result <- solist(estimate=pcase, SE=SE) } } else { rcase <- eval.im(ifelse(pcase < 1, pcase/(1-pcase), NA)) if(!se) { result <- rcase } else { Vcase <- Veach[[icase]] Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] NUM <- eval.im(Vcase + Vctrl * rcase^2) SE <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) result <- solist(estimate=rcase, SE=SE) } } }, points={ ## compute probability of case pcase <- Deach[,icase]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values dodgy <- (Dall < tinythresh) if(any(nbg <- badvalues(pcase) | really(dodgy))) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## apply l'Hopital's rule nntype <- imarks[nnwhich(X)] pcase[nbg] <- as.integer(nntype[nbg] == icase) } if(!relative) { if(!se) { result <- pcase } else { NUM <- Veach[,icase] * (1-2*pcase) + Vall * pcase^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=pcase, SE=SE) } } else { rcase <- ifelse(pcase < 1, pcase/(1-pcase), NA) if(!se) { result <- rcase } else { NUM <- Veach[,icase] + Veach[,icontrol] * rcase^2 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=rcase, SE=SE) } } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", sQuote(control))) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ #' Ops.imagelist not yet working probs <- imagelistOp(Deach, Dall, "/") ## correct small numerical errors probs <- as.solist(lapply(probs, clamp01)) ## trap NaN values nbg <- lapply(probs, badvalues) nbg <- Reduce("|", nbg) dodgy <- (Dall < tinythresh) nbg <- nbg | really(dodgy) if(any(nbg)) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## apply l'Hopital's rule distX <- distmap(X, xy=Dall) whichnn <- attr(distX, "index") typenn <- eval.im(imarks[whichnn]) typennsub <- as.matrix(typenn)[nbg] for(k in seq_along(probs)) probs[[k]][nbg] <- (typennsub == k) } if(!relative) { if(!se) { result <- probs } else { SE <- list() for(i in 1:ntypes) { NUM <- (Veach[[i]] * (1 - 2 * probs[[i]]) + Vall * probs[[i]]^2) SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dall) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=probs, SE=SE) } } else { risks <- as.solist(lapply(probs, divideifpositive, d = probs[[icontrol]])) if(!se) { result <- risks } else { Vctrl <- Veach[[icontrol]] Dctrl <- Deach[[icontrol]] SE <- list() for(i in 1:ntypes) { NUM <- Veach[[i]] + Vctrl * risks[[i]]^2 SE[[i]] <- eval.im(sqrt(pmax(NUM, 0))/Dctrl) } SE <- as.solist(SE) names(SE) <- types result <- list(estimate=risks, SE=SE) } } }, points = { probs <- Deach/Dall ## correct small numerical errors probs <- clamp01(probs) ## trap NaN values dodgy <- (Dall < tinythresh) bad <- badvalues(probs) badrow <- matrowany(bad) | really(dodgy) if(any(badrow)) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## apply l'Hopital's rule typenn <- imarks[nnwhich(X)] probs[badrow, ] <- (typenn == col(result))[badrow, ] } if(!relative) { if(!se) { result <- probs } else { NUM <- Veach * (1-2*probs) + Vall * probs^2 SE <- sqrt(pmax(NUM, 0))/Dall result <- list(estimate=probs, SE=SE) } } else { risks <- probs/probs[,icontrol] if(!se) { result <- risks } else { NUM <- Veach + Veach[,icontrol] * risks^2 NUM[,icontrol] <- 0 SE <- sqrt(pmax(NUM, 0))/Deach[,icontrol] result <- list(estimate=risks, SE=SE) } } }) } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov if(length(uhoh)) attr(result, "warnings") <- uhoh return(result) } clamp01 <- function(x) { if(is.im(x)) return(eval.im(pmin(pmax(x, 0), 1))) return(pmin(pmax(x, 0), 1)) } badvalues <- function(x) { if(is.im(x)) x <- as.matrix(x) return(!(is.finite(x) | is.na(x))) } really <- function(x) { if(is.im(x)) x <- as.matrix(x) x[is.na(x)] <- FALSE return(x) } reciprocal <- function(x) 1/x divideifpositive <- function(z, d) { eval.im(ifelse(d > 0, z/d, NA)) } relrisk.ppp }) bw.stoyan <- function(X, co=0.15) { ## Stoyan's rule of thumb stopifnot(is.ppp(X)) n <- npoints(X) W <- Window(X) a <- area(W) stoyan <- co/sqrt(5 * max(1,n)/a) return(stoyan) } bw.relrisk <- function(X, ...) { UseMethod("bw.relrisk") } bw.relrisk.ppp <- function(X, method="likelihood", ..., nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) ## rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] ## Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") n <- npoints(X) marx <- marks(X) method <- pickoption("method", method, c(likelihood="likelihood", leastsquares="leastsquares", ls="leastsquares", LS="leastsquares", weightedleastsquares="weightedleastsquares", wls="weightedleastsquares", WLS="weightedleastsquares")) ## if(method != "likelihood") { ## dummy variables for each type imarks <- as.integer(marx) if(ntypes == 2) { ## 1 = control, 2 = case indic <- (imarks == 2) y01 <- as.integer(indic) } else { indic <- matrix(FALSE, n, ntypes) indic[cbind(seq_len(n), imarks)] <- TRUE y01 <- indic * 1 } X01 <- X %mark% y01 } ## cross-validated bandwidth selection ## determine a range of bandwidth values if(is.null(hmin) || is.null(hmax)) { W <- Window(X) a <- area(W) d <- diameter(as.rectangle(W)) ## Stoyan's rule of thumb applied to the least and most common types mcount <- table(marx) nmin <- max(1, min(mcount)) nmax <- max(1, max(mcount)) stoyan.low <- 0.15/sqrt(nmax/a) stoyan.high <- 0.15/sqrt(nmin/a) if(is.null(hmin)) hmin <- max(minnndist(unique(X)), stoyan.low/5) if(is.null(hmax)) { hmax <- min(d/4, stoyan.high * 20) hmax <- max(hmax, hmin * 2) } } else stopifnot(hmin < hmax) ## h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) ## ## compute cross-validation criterion switch(method, likelihood={ methodname <- "Negative Likelihood" ## for efficiency, only compute the estimate of p_j(x_i) ## when j = m_i = mark of x_i. Dthis <- numeric(n) for(i in seq_len(nh)) { Dall <- density.ppp(X, sigma=h[i], at="points", edge=FALSE, sorted=TRUE, ...) Deach <- density.splitppp(Y, sigma=h[i], at="points", edge=FALSE, sorted=TRUE, ...) split(Dthis, marx) <- Deach pthis <- Dthis/Dall cv[i] <- -mean(log(pthis)) } }, leastsquares={ methodname <- "Least Squares" for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE, ...) phat <- as.matrix(phat) cv[i] <- mean((y01 - phat)^2) } }, weightedleastsquares={ methodname <- "Weighted Least Squares" ## need initial value of h from least squares h0 <- bw.relrisk(X, "leastsquares", nh=ceiling(nh/4)) phat0 <- Smooth(X01, sigma=h0, at="points", leaveoneout=TRUE, sorted=TRUE, ...) phat0 <- as.matrix(phat0) var0 <- phat0 * (1-phat0) var0 <- pmax.int(var0, 1e-6) for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE, ...) phat <- as.matrix(phat) cv[i] <- mean((y01 - phat)^2/var0) } }) ## optimize result <- bw.optim(cv, h, hname="sigma", creator="bw.relrisk", criterion=paste(methodname, "Cross-Validation"), warnextreme=warn, hargnames=c("hmin", "hmax"), unitname=unitname(X)) return(result) } which.max.im <- function(x) { .Deprecated("im.apply", "spatstat.geom", "which.max.im(x) is deprecated: use im.apply(x, which.max)") ans <- im.apply(x, which.max) return(ans) } spatstat.explore/R/plot.fasp.R0000644000176200001440000001334114611073310016022 0ustar liggesusers# # plot.fasp.R # # $Revision: 1.31 $ $Date: 2022/11/03 11:08:33 $ # plot.fasp <- function(x, formule=NULL, ..., subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) { # plot dimensions which <- x$which if(transpose) which <- t(which) nrows <- nrow(which) ncols <- ncol(which) # Determine the overall title of the plot if(banner) { if(!is.null(title)) overall <- title else if(!is.null(x$title)) overall <- x$title else { if(prod(dim(which)) > 1) overall <- "Array of diagnostic functions" else overall <- "Diagnostic function" if(is.null(x$dataname)) overall <- paste(overall,".",sep="") else overall <- paste(overall," for ",x$dataname,".",sep="") } if(length(overall) > 1) overall <- paste(overall, collapse="\n") nlines <- if(!is.character(overall)) 1 else length(unlist(strsplit(overall, "\n"))) } # If no formula is given, look for a default formula in x: defaultplot <- is.null(formule) if(defaultplot && !is.null(x$default.formula)) formule <- x$default.formula if(!is.null(formule)) { # ensure formulae are given as character strings. formule <- FormatFaspFormulae(formule, "formule") # Number of formulae should match number of functions. nf <- length(formule) nfun <- length(x$fns) if(nf == 1 && nfun > 1) formule <- rep.int(formule, nfun) else if(nf != nfun) stop(paste("Wrong number of entries in", sQuote("formule"))) } # Check on the length of the subset argument. ns <- length(subset) if(ns > 1) { if(ns != length(x$fns)) stop("Wrong number of entries in subset argument.\n") msub <- TRUE } else msub <- FALSE # compute common x, y axis limits for all plots ? xlim <- ylim <- NULL if(samex || samey) { cat("Computing limits\n") # call plot.fv to determine plot limits of each panel for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(!is.na(k)) { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset lims <- plot(fun, fmla, subset=sub, limitsonly=TRUE) # update the limits if(samex) xlim <- range(xlim, lims$xlim) if(samey) ylim <- range(ylim, lims$ylim) } } } } ############################################################# # Set up the plot layout n <- nrows * ncols # panels 1..n = plot panels codes <- matrix(seq_len(n), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) # annotation as chosen if(outerlabels) { # column headings colhead.codes <- max(codes) + (1:ncols) colhead.height <- 0.2 codes <- rbind(colhead.codes, codes) heights <- c(colhead.height, heights) # row headings rowhead.codes <- max(codes) + (1:nrows) rowhead.width <- 0.2 codes <- cbind(c(0,rowhead.codes), codes) widths <- c(rowhead.width, widths) } if(banner) { # overall banner top.code <- max(codes) + 1 top.height <- 0.1 * (1+nlines) codes <- rbind(top.code, codes) heights <- c(top.height, heights) } # declare layout layout(codes, widths=widths, heights=heights) ############################################################ # Plot the function panels # # determine annotation colNames <- colnames(which) rowNames <- rownames(which) nrc <- max(nrows, ncols) ann.def <- par("ann") && (nrc <= 3) # determine margin around each panel if(is.null(mar.panel)) mar.panel <- if(nrc > 3 && outerlabels) rep.int(1/nrc, 4) else par("mar") opa <- par(mar=mar.panel, xpd=TRUE) on.exit(par(opa)) # # plot each function for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(is.na(k)) { plot(0,0,type='n',xlim=c(0,1), ylim=c(0,1),axes=FALSE,xlab='',ylab='', ...) } else { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset main <- if(outerlabels) "" else if(nrows == 1) colNames[j] else if(ncols == 1) rowNames[i] else paren(paste(rowNames[i], colNames[j], sep=",")) do.call(plot, resolve.defaults(list(x=quote(fun), fmla=quote(fmla), subset=quote(sub)), list(...), list(xlim=xlim, ylim=ylim, main=main, legend=legend), list(ann=ann.def, axes=ann.def, frame.plot=TRUE))) } } } ############################################################ # # Annotation as selected if(outerlabels) { par(mar=rep.int(0,4), xpd=TRUE) # Plot the column headers for(j in 1:ncols) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,colNames[j], cex=cex.outerlabels) } # Plot the row labels for(i in 1:nrows) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,rowNames[i], srt=90, cex=cex.outerlabels) } } if(banner) { par(mar=rep.int(0,4), xpd=TRUE) # plot the banner plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0, overall, cex=cex) } # revert layout(1) return(invisible(NULL)) } spatstat.explore/R/ssf.R0000644000176200001440000001626414611073310014716 0ustar liggesusers# # ssf.R # # spatially sampled functions # # $Revision: 1.22 $ $Date: 2022/05/23 02:33:06 $ # ssf <- function(loc, val) { stopifnot(is.ppp(loc)) if(is.function(val)) val <- val(loc$x, loc$y) if(is.data.frame(val)) val <- as.matrix(val) if(!is.matrix(val)) val <- matrix(val, ncol=1, dimnames=list(NULL, "value")) if(nrow(val) != npoints(loc)) stop("Incompatible lengths") result <- loc %mark% val class(result) <- c("ssf", class(result)) attr(result, "ok") <- complete.cases(val) return(result) } print.ssf <- function(x, ..., brief=FALSE) { if(brief) { splat("Spatial function sampled at", npoints(x), "locations") } else { splat("Spatially sampled function") cat("Locations:\n\t") print(unmark(x)) } val <- marks(x) if(!is.matrix(val)) { d <- 1 warning("Internal format error: val is not a matrix") } else d <- ncol(val) if(!brief) { type <- if(d == 1) "Scalar" else paste(d, "-vector", sep="") splat(type, "valued function") } if(d > 1 && !is.null(nama <- colnames(val))) splat("Component names:", commasep(sQuote(nama))) return(invisible(NULL)) } summary.ssf <- function(object, ...) { z <- NextMethod("summary") class(z) <- c("summary.ssf", class(z)) return(z) } print.summary.ssf <- function(x, ...) { splat("Spatially sampled function") cat("Locations:\n\t") NextMethod("print") } image.ssf <- function(x, ...) { do.call("plot", resolve.defaults(list(quote(x), how="smoothed"), list(...))) } as.im.ssf <- function(X, ...) nnmark(X, ...) as.function.ssf <- function(x, ...) { X <- x mX <- marks(X) switch(markformat(X), vector = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J] return(unname(result)) } }, dataframe = { g <- function(x, y=NULL) { Y <- xy.coords(x,y)[c("x","y")] J <- nncross(Y, X, what="which") result <- mX[J,,drop=FALSE] row.names(result) <- NULL return(result) } }, stop("Marks must be a vector or data.frame")) h <- funxy(g, Frame(X)) return(h) } plot.ssf <- function(x, ..., how=c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma=NULL, contourargs=list()) { xname <- short.deparse(substitute(x)) how <- match.arg(how) style <- match.arg(style) otherargs <- list(...) # convert to images y <- switch(how, points = as.ppp(x), nearest = nnmark(x), smoothed = Smooth(x, sigma=sigma) ) dont.complain.about(y) # points plot if(how == "points") { out <- do.call("plot", resolve.defaults(list(quote(y)), otherargs, list(main=xname))) if(is.null(out)) return(invisible(NULL)) return(out) } # image plot switch(style, image = { out <- do.call("plot", resolve.defaults(list(quote(y)), otherargs, list(main=xname))) }, contour = { xwin <- as.owin(x) dont.complain.about(xwin) do.call("plot", resolve.defaults(list(quote(xwin)), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(quote(y), add=TRUE), contourargs)) out <- NULL }, imagecontour = { out <- do.call("plot", resolve.defaults(list(quote(y)), otherargs, list(main=xname))) do.call("contour", resolve.defaults(list(quote(y), add=TRUE), contourargs)) }) return(invisible(out)) } contour.ssf <- function(x, ..., main, sigma=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) y <- Smooth(x, sigma=sigma) contour(y, ..., main=main) return(invisible(NULL)) } Smooth.ssf <- function(X, ...) { stopifnot(inherits(X, "ssf")) ok <- attr(X, "ok") Y <- as.ppp(X)[ok] argh <- list(...) isnul <- as.logical(unlist(lapply(argh, is.null))) nonnularg <- argh[!isnul] sigma0 <- if(any(c("sigma", "varcov") %in% names(nonnularg))) NULL else 1.4 * max(nndist(X)) dont.complain.about(Y) Z <- do.call("Smooth.ppp", resolve.defaults(list(X = quote(Y)), list(...), list(sigma=sigma0), .MatchNull=FALSE)) # don't take NULL for an answer! return(Z) } "[.ssf" <- function(x, i, j, ..., drop) { loc <- unmark(x) val <- marks(x) ok <- attr(x, "ok") # if(!missing(j)) val <- val[, j, drop=FALSE] if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i] loc <- unmark(loci) id <- marks(loci) # extract val <- val[id, , drop=FALSE] ok <- ok[id] } out <- loc %mark% val class(out) <- c("ssf", class(out)) attr(out, "ok") <- ok return(out) } as.ppp.ssf <- function(X, ...) { class(X) <- "ppp" attr(X, "ok") <- NULL return(X) } marks.ssf <- function(x, ...) { val <- x$marks if(is.null(dim(val))) val <- matrix(val, ncol=1) if(is.data.frame(val)) val <- as.matrix(val) return(val) } "marks<-.ssf" <- function(x, ..., value) { ssf(unmark(x), value) } unmark.ssf <- function(X) { unmark(as.ppp(X)) } with.ssf <- function(data, ...) { loc <- as.ppp(data) val <- marks(data) newval <- with(as.data.frame(val), ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } ## the following is NOT a method for 'apply' !! apply.ssf <- function(X, ...) { loc <- as.ppp(X) val <- marks(X) newval <- apply(val, ...) if(length(newval) == npoints(loc) || (is.matrix(newval) && nrow(newval) == npoints(loc))) return(ssf(loc, newval)) return(newval) } range.ssf <- function(x, ...) range(marks(x), ...) min.ssf <- function(x, ...) min(marks(x), ...) max.ssf <- function(x, ...) max(marks(x), ...) integral.ssf <- function(f, domain=NULL, ..., weights=attr(f, "weights")) { if(!is.null(weights)) { check.nvector(weights, npoints(f), oneok=TRUE, vname="weights") if(length(weights) == 1) weights <- rep(weights, npoints(f)) } if(is.tess(domain)) { result <- sapply(tiles(domain), integral.ssf, f=f, weights=weights) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) { ok <- inside.owin(f, w=domain) f <- f[ok,] if(!is.null(weights)) weights <- weights[ok] } y <- marks(f) if(is.null(weights)) { z <- if(!is.matrix(y)) mean(y, na.rm=TRUE) else colMeans(y, na.rm=TRUE) a <- area(Window(f)) } else { z <- if(!is.matrix(y)) weighted.mean(y, w=weights, na.rm=TRUE) else apply(y, 2, weighted.mean, w=weights, na.rm=TRUE) a <- sum(weights) } z[!is.finite(z)] <- 0 return(z * a) } spatstat.explore/R/alltypes.R0000644000176200001440000001617214611073307015764 0ustar liggesusers# # alltypes.R # # $Revision: 1.38 $ $Date: 2022/01/04 05:30:06 $ # # alltypes <- function(X, fun="K", ..., dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) { # # Function 'alltypes' --- calculates a summary function for # each type, or each pair of types, in a multitype point pattern # if(is.ppp(X)) classname <- "ppp" else if(is.lpp(X)) classname <- "lpp" else stop("X should be a ppp or lpp object") if(is.null(dataname)) dataname <- short.deparse(substitute(X)) # -------------------------------------------------------------------- # First inspect marks if(!is.marked(X)) { nmarks <- 0 marklabels <- "" } else { if(!is.multitype(X)) stop("the marks must be a factor") # ensure type names are parseable (for mathematical labels) levels(marks(X)) <- make.parseable(levels(marks(X))) mks <- marks(X) ma <- levels(mks) nmarks <- length(ma) marklabels <- paste(ma) } # --------------------------------------------------------------------- # determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") # --------------------------------------------------------------------- # determine function to be called if(is.function(fun)) { estimator <- fun } else if(is.character(fun)) { # First try matching one of the standard abbreviations K, G etc estimator <- getSumFun(fun, classname, (nmarks > 0), fatal=FALSE) if(is.null(estimator)) estimator <- get(fun, mode="function") } else stop(paste(sQuote("fun"), "should be a function or a character string")) # ------------------------------------------------------------------ # determine how the function shall be called. # indices.expected <- sum(c("i", "j") %in% names(formals(estimator))) apply.to.split <- (indices.expected == 0 && nmarks > 1) if(apply.to.split) ppsplit <- split(X) # -------------------------------------------------------------------- # determine array dimensions and margin labels witch <- if(nmarks == 0) matrix(1L, nrow=1L, ncol=1L, dimnames=list("","")) else if (nmarks == 1) matrix(1L, nrow=1L, ncol=1L, dimnames=list(marklabels, marklabels)) else if(indices.expected != 2) matrix(1L:nmarks, nrow=nmarks, ncol=1L, dimnames=list(marklabels, "")) else matrix(1L:(nmarks^2),ncol=nmarks,nrow=nmarks, byrow=TRUE, dimnames=list(marklabels, marklabels)) # ------------ start computing ------------------------------- # if computing envelopes, first generate simulated patterns # using undocumented feature of envelope() if(envelope && reuse) { L <- do.call(spatstat.explore::envelope, resolve.defaults( list(quote(X), fun=estimator), list(internal=list(eject="patterns")), list(...), switch(1L+indices.expected, NULL, list(i=ma[1L]), list(i=ma[1L], j=ma[2L]), NULL), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 maxerr.action <- if(verb) "warn" else "null" for(i in 1L:nrow(witch)) { Y <- if(apply.to.split) ppsplit[[i]] else X for(j in 1L:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- if(!envelope) switch(1L+indices.expected, estimator(Y, ...), estimator(Y, i=ma[i], ...), estimator(Y, i=ma[i], j=ma[j], ...)) else do.call(spatstat.explore::envelope, resolve.defaults( list(quote(Y), estimator), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname, silent=TRUE, maxerr.action=maxerr.action), switch(1L+indices.expected, NULL, list(i=ma[i]), list(i=ma[i], j=ma[j]), NULL))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } einfo <- lapply(fns, attr, which="einfo") gaveup <- sapply(lapply(einfo, getElement, name="gaveup"), isTRUE) if(any(gaveup)) { ng <- sum(gaveup) warning(paste(ng, "out of", length(fns), "envelopes", ngettext(ng, "was", "were"), "not computed, due to errors in evaluating", "the summary functions for simulated patterns")) } # wrap up into 'fasp' object title <- paste(if(nmarks > 1) "array of " else NULL, if(envelope) "envelopes of " else NULL, fname, if(nmarks <= 1) " function " else " functions ", "for ", dataname, ".", sep="") rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } # Lookup table for standard abbreviations of functions getSumFun <- local({ ftable <- rbind( data.frame(class="ppp", marked=FALSE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full=c("Fest", "Gest", "Jest", "Kest", "Lest", "pcf"), stringsAsFactors=FALSE), data.frame(class="ppp", marked=TRUE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full= c("Fest", "Gcross", "Jcross", "Kcross", "Lcross", "pcfcross"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=FALSE, abbrev=c("K", "pcf"), full=c("linearK", "linearpcf"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=TRUE, abbrev=c("K", "pcf"), full=c("linearKcross", "linearpcfcross"), stringsAsFactors=FALSE) ) getfun <- function(abbreviation, classname, ismarked, fatal=TRUE) { matches <- with(ftable, which(abbrev == abbreviation & class == classname & marked == ismarked)) if(length(matches) == 0) { if(!fatal) return(NULL) stop(paste("No match to function abbreviation", sQuote(abbreviation), "for class", sQuote(classname))) } if(length(matches) > 1) stop("Ambiguous function name") fullname <- ftable$full[matches] get(fullname, mode="function") } getfun }) spatstat.explore/R/sigtrace.R0000644000176200001440000001430414611073310015715 0ustar liggesusers# # sigtrace.R # # $Revision: 1.10 $ $Date: 2016/02/11 09:36:11 $ # # Significance traces # dclf.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=2) mad.sigtrace <- function(X, ...) mctest.sigtrace(X, ..., exponent=Inf) mctest.sigtrace <- function(X, fun=Lest, ..., exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, c("envelope", "hasenvelope"))) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim result <- mctestSigtraceEngine(R, devdata, devsim, interpolate=interpolate, confint=confint, alpha=alpha, exponent=exponent, unitname=unitname(X)) result <- hasenvelope(result, Z$envelope) # envelope may be NULL return(result) } mctestSigtraceEngine <- local({ mctestSigtraceEngine <- function(R, devdata, devsim, ..., interpolate=FALSE, confint=TRUE, alpha=0.05, exponent=2, unitname=NULL) { nsim <- ncol(devsim) if(!interpolate) { #' Monte Carlo p-value datarank <- apply(devdata < devsim, 1, sum) + apply(devdata == devsim, 1, sum)/2 + 1 pvalue <- datarank/(nsim+1) } else { #' interpolated p-value devs <- cbind(devdata, devsim) pvalue <- apply(devs, 1, rowwise.interp.tailprob) } if(!confint) { #' create fv object without confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance"), labl=c("R", "%s(R)", paste(alpha)), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha") } else { # confidence interval if(!interpolate) { #' Agresti-Coull confidence interval successes <- datarank - 1 trials <- nsim z <- qnorm(1 - (1-0.95)/2) nplus <- trials + z^2 pplus <- (successes + z^2/2)/nplus sigmaplus <- sqrt(pplus * (1-pplus)/nplus) lo <- pplus - z * sigmaplus hi <- pplus + z * sigmaplus } else { #' confidence interval by delta method pSE <- apply(devs, 1, rowwise.se) z <- qnorm(1 - (1-0.95)/2) lo <- pmax(0, pvalue - z * pSE) hi <- pmin(1, pvalue + z * pSE) } #' create fv object with confidence interval p <- fv(data.frame(R=R, pest=pvalue, alpha=alpha, lo=lo, hi=hi), argu="R", ylab = quote(p(R)), valu="pest", fmla = . ~ R, desc = c("Interval endpoint R", "calculated p-value %s", "threshold for significance", "lower 95%% limit for p-value", "upper 95%% limit for p-value"), labl=c("R", "%s(R)", paste(alpha), "lo(R)", "hi(R)"), unitname = unitname, fname = "p") fvnames(p, ".") <- c("pest", "alpha", "lo", "hi") fvnames(p, ".s") <- c("lo", "hi") } return(p) } ## interpolated p-value interpol.tailprob <- function(x, q) { sigma <- bw.nrd0(x) mean(pnorm(q, mean=x, sd=sigma, lower.tail=FALSE)) } rowwise.interp.tailprob <- function(x) { interpol.tailprob(x[-1], x[1]) } ## estimated SE of p-value interpol.se <- function(x, q) { sigma <- bw.nrd0(x) z <- density(x, sigma) v <- mean(z$y * pnorm(q, mean=z$x, sd=sigma, lower.tail=FALSE)^2) * diff(range(z$x)) sqrt(v)/length(x) } rowwise.se <- function(x) { interpol.se(x[-1], x[1]) } mctestSigtraceEngine }) dg.sigtrace <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate=FALSE, confint=TRUE, alpha=0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) { alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## generate or extract simulated patterns and functions if(verbose) cat("Generating first-level data...") E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=verbose, envir.simul=env.here) ## get first level MC test significance trace if(verbose) cat("Computing significance trace...") T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=verbose, ...) R <- T1$R phat <- T1$pest ## second level traces if(verbose) cat(" Done.\nGenerating second-level data... [silently] ..") Pat <- attr(E, "simpatterns") T2list <- lapply(Pat, mctest.sigtrace, fun=fun, nsim=nsimsub, exponent=exponent, rmin=rmin, alternative=alternative, leaveout=leaveout, interpolate=interpolate, confint=FALSE, verbose=FALSE, ...) phati <- sapply(T2list, getElement, name="pest") ## Dao-Genton p-value if(verbose) cat(" Computing significance trace...") result <- mctestSigtraceEngine(R, -phat, -phati, interpolate=FALSE, confint=confint, exponent=exponent, alpha=alpha, unitname=unitname(X)) if(verbose) cat(" Done.\n") if(savefuns || savepatterns) result <- hasenvelope(result, E) return(result) } spatstat.explore/R/radcum.R0000644000176200001440000000307614611073310015373 0ustar liggesusers## ## radcum.R ## ## cumulative integral as function of distance ## ## $Revision: 1.1 $ $Date: 2022/07/16 03:29:09 $ radcumint <- function(X, ..., origin, Xname, result=c("fv", "im")) { if(missing(Xname)) Xname <- sensiblevarname(short.deparse(substitute(X)), "X") result <- match.arg(result) trap.extra.arguments(..., .Context="radcum") stopifnot(is.im(X)) if(!missing(origin) && !is.null(origin)) { X <- shift(X, origin=origin) backshift <- -getlastshift(X) } else { backshift <- NULL } #' determine discretisation steps rmax <- with(vertices(Frame(X)), sqrt(max(x^2+y^2))) pixarea <- with(X, xstep * ystep) eps <- with(X, sqrt(xstep^2 + ystep^2)) #' Xdata <- as.data.frame(X) values <- Xdata$value radii <- with(Xdata, sqrt(x^2+y^2)) #' rmax <- max(max(radii), rmax) + 2*eps rr <- seq(0, rmax, by=eps) wh <- whist(radii, breaks=rr, weights=pixarea * values) yy <- cumsum(c(0,wh)) #' df <- data.frame(r=rr, f=yy) FUN <- fv(df, argu="r", ylab=substitute(bar(X)(r), list(X=as.name(Xname))), valu="f", fmla=(. ~ r), alim=c(0, rmax), labl=c("r", "%s(r)"), desc=c("distance argument r", "rotational average"), unitname=unitname(X), fname=paste0("bar", paren(Xname))) fvnames(FUN, ".") <- "f" if(result == "fv") return(FUN) ## compute image FUN <- as.function(FUN) IM <- as.im(function(x,y,FUN){ FUN(sqrt(x^2+y^2)) }, X, FUN=FUN) if(!is.null(backshift)) IM <- shift(IM,backshift) return(IM) } spatstat.explore/R/GJfox.R0000644000176200001440000001047414611073307015143 0ustar liggesusers# # GJfox.R # # Foxall G-function and J-function # # $Revision: 1.13 $ $Date: 2022/05/17 07:48:43 $ # Gfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W=NULL, ...) { stopifnot(is.ppp(X)) #' validate and resolve windows a <- resolve.foxall.window(X, Y, W) X <- a$X Y <- a$Y W <- a$W #' if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distances and censoring distances D <- distfun(Y) dist <- D(X) bdry <- bdist.points(X[W]) # sic ## histogram breakpoints dmax <- max(dist) breaks <- handle.r.b.args(r, breaks, Window(X), NULL, rmaxdefault=dmax) rval <- breaks$r ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(Window(X), rval) else NULL, tt=dist, fname=c("G", "fox"), fexpr=quote(G[fox](r)) ) ## relabel unitname(Z) <- unitname(Y) return(Z) } Jfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W=NULL, ..., warn.trim=TRUE) { ## validate and resolve windows a <- resolve.foxall.window(X, Y, W, isTRUE(warn.trim)) X <- a$X Y <- a$Y W <- a$W ## process H <- Hest(Y, r=r, breaks=breaks, correction=correction, ..., W=W) G <- Gfox(X, Y, r=H$r, correction=correction, ..., W=W) ## derive J-function J <- eval.fv((1-G)/(1-H), dotonly=FALSE) ## correct calculation of hazard is different if("hazard" %in% names(J)) J$hazard <- G$hazard - H$hazard ## base labels on 'J' rather than full expression attr(J, "labl") <- attr(G, "labl") ## add column of 1's J <- bind.fv(J, data.frame(theo=rep.int(1, nrow(J))), "{%s[%s]^{theo}}(r)", "theoretical value of %s for independence") ## rename J <- rebadge.fv(J, quote(J[fox](r)), c("J", "fox")) funs <- c("km", "han", "rs", "raw", "theo") fvnames(J, ".") <- funs[funs %in% names(J)] unitname(J) <- unitname(Y) attr(J, "conserve") <- attr(H, "conserve") return(J) } resolve.foxall.window <- function(X, Y, W=NULL, warn.trim=TRUE) { if(!(is.ppp(Y) || is.psp(Y) || is.owin(Y) || is.im(Y))) stop("Y should be an object of class ppp, psp, owin or im") if(is.im(Y) && !is.logical(ZeroValue(Y))) stop("When Y is an image, its pixel values should be logical values") if(!identical(unitname(X), unitname(Y))) warning("X and Y are not in the same units") ## default window based on Y if(is.ppp(Y) || is.psp(Y)) { W0 <- Window(Y) W0describe <- "the observation window of Y" } else if(is.owin(Y)) { W0 <- Frame(Y) W0describe <- "the Frame of Y" } else if(is.im(Y)) { W0 <- Window(Y) W0describe <- "the observation window of Y" Y <- solutionset(Y) } else stop("Y should be an object of class ppp, psp, owin or im") ## actual window used for estimation if(!is.null(W)) { stopifnot(is.owin(W)) if(!is.subset.owin(W, W0)) stop(paste("W is not a subset of", W0describe)) Wdescribe <- "W" } else { W <- W0 Wdescribe <- W0describe } ## ensure compatible windows WX <- Window(X) if(!is.subset.owin(WX, W)) { if(warn.trim) warning(paste("Trimming the window of X to be a subset of", Wdescribe)) WX <- intersect.owin(WX, W) if(area.owin(WX) == 0) stop("Trimmed window has zero area") X <- X[WX] if(npoints(X) == 0) stop("No points remaining after trimming window") } return(list(X=X, Y=Y, W=W)) } spatstat.explore/R/hotbox.R0000644000176200001440000000365214611073311015424 0ustar liggesusers#' #' hotbox.R #' #' Heat kernel for a one-dimensional rod #' and two-dimensional rectangle #' #' Code from Greg McSwiggan and Adrian Baddeley hotbox <- function(Xsource, Xquery, sigma, ..., W=NULL, squared=FALSE, nmax=20) { #' heat kernel in a rectangle check.1.real(sigma) if(is.null(W)) { if(is.ppp(Xsource)) W <- Window(Xsource) else if(is.sob(Xquery)) W <- Window(Xquery) else stop("No window information is present") } else { stopifnot(is.owin(W)) if(!is.sob(Xsource)) Xsource <- as.ppp(Xsource, W) if(!is.sob(Xquery)) Xquery <- as.ppp(Xquery, W) } if(!is.rectangle(W)) stop("The window must be a rectangle") slen <- sidelengths(W) Xsource <- shift(Xsource, origin="bottomleft") Xquery <- shift(Xquery, origin="bottomleft") nsource <- npoints(Xsource) if(is.ppp(Xquery)) { nquery <- npoints(Xquery) answer <- numeric(nquery) for(i in seq_len(nsource)) { cx <- hotrod(slen[1], Xsource$x[i], Xquery$x, sigma, ends="insulated", nmax=nmax) cy <- hotrod(slen[2], Xsource$y[i], Xquery$y, sigma, ends="insulated", nmax=nmax) contrib <- cx * cy if(squared) contrib <- contrib^2 answer <- answer + contrib } } else if(is.im(Xquery) || is.owin(Xquery)) { Xquery <- as.im(Xquery, ...) if(anyNA(Xquery)) stop("Image must be a full rectangle") ansmat <- matrix(0, nrow(Xquery), ncol(Xquery)) xx <- Xquery$xcol yy <- Xquery$yrow for(i in seq_len(nsource)) { cx <- hotrod(slen[1], Xsource$x[i], xx, sigma, ends="insulated", nmax=nmax) cy <- hotrod(slen[2], Xsource$y[i], yy, sigma, ends="insulated", nmax=nmax) contrib <- outer(cy, cx, "*") if(squared) contrib <- contrib^2 ansmat <- ansmat + contrib } answer <- Xquery answer[] <- ansmat } else stop("Unrecognised format for Xquery") return(answer) } spatstat.explore/R/pcf.R0000644000176200001440000002133314611073310014664 0ustar liggesusers#' #' pcf.R #' #' $Revision: 1.74 $ $Date: 2023/02/28 03:14:29 $ #' #' calculate pair correlation function from point pattern (pcf.ppp) #' pcf <- function(X, ...) { UseMethod("pcf") } pcf.ppp <- function(X, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r", "d"), var.approx=FALSE, domain=NULL, ratio=FALSE, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- Window(X) areaW <- area(win) npts <- npoints(X) samplesize <- npairs <- npts * (npts - 1) lambda <- npts/areaW lambda2area <- npairs/areaW kernel <- match.kernel(kernel) rmaxdefault <- rmax.rule("K", win, lambda) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, win)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick pcfdot() into doing it indom <- inside.owin(X$x, X$y, domain) marx <- factor(indom, levels=c(FALSE,TRUE)) g <- pcfdot(X %mark% marx, i="TRUE", r=r, correction=correction, kernel=kernel, bw=bw, stoyan=stoyan, divisor=divisor, ...) if(!ratio) { ## relabel g <- rebadge.fv(g, quote(g(r)), "g") } else { ## construct ratfv object ninside <- sum(indom) samplesize <- ninside * (npts-1) g <- ratfv(as.data.frame(g), NULL, samplesize, "r", quote(g(r)), "theo", NULL, c(0, rmaxdefault), attr(g, "labl"), attr(g, "desc"), fname="g", ratio=TRUE) } unitname(g) <- unitname(X) if(var.approx) warning("var.approx is not implemented when 'domain' is given") return(g) } correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="translate", best="best", none="none"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) # bandwidth if(is.null(bw) && (kernel == "epanechnikov")) { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambda) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambda) } ########## r values ############################ # handle arguments r and breaks breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax), .StripNull = TRUE) ################################################# # compute pairwise distances if(npts > 1) { needall <- any(correction %in% c("translate", "isotropic")) if(is.null(close)) { what <- if(needall) "all" else "ijd" close <- closepairs(X, rmax + hmax, what=what) } else { #' check 'close' has correct format needed <- if(!needall) c("i", "j", "d") else c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- ratfv(df, NULL, samplesize, "r", quote(g(r)), "theo", NULL, alim, c("r","%s[Pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="g", ratio=ratio) ###### compute ####### bw.used <- NULL if(any(correction=="none")) { #' uncorrected if(npts > 1) { kdenN <- sewpcf(dIJ, 1, denargs, lambda2area, divisor) gN <- kdenN$g bw.used <- attr(kdenN, "bw") } else gN <- undefined out <- bind.ratfv(out, quotient=data.frame(un=gN), denominator=samplesize, labl="hat(%s)[un](r)", desc="uncorrected estimate of %s", preferred="un", ratio=ratio) } if(any(correction=="translate")) { # translation correction if(npts > 1) { edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=win, paired=TRUE) kdenT <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gT <- kdenT$g bw.used <- attr(kdenT, "bw") } else gT <- undefined out <- bind.ratfv(out, quotient=data.frame(trans=gT), denominator=samplesize, labl="hat(%s)[Trans](r)", desc="translation-corrected estimate of %s", preferred="trans", ratio=ratio) } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { XI <- ppp(close$xi, close$yi, window=win, check=FALSE) edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) kdenR <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor) gR <- kdenR$g bw.used <- attr(kdenR, "bw") } else gR <- undefined out <- bind.ratfv(out, quotient=data.frame(iso=gR), denominator=samplesize, labl="hat(%s)[Ripley](r)", desc="isotropic-corrected estimate of %s", preferred="iso", ratio=ratio) } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } ## variance approximation ## Illian et al 2008 p 234 equation 4.3.42 if(var.approx) { gr <- if(any(correction == "isotropic")) gR else gT # integral of squared kernel intk2 <- kernel.squint(kernel, bw.used) # isotropised set covariance of window gWbar <- as.function(rotmean(setcov(win), result="fv")) vest <- gr * intk2/(pi * r * gWbar(r) * lambda^2) out <- bind.ratfv(out, quotient=data.frame(v=vest), denominator=samplesize, labl="v(r)", desc="approximate variance of %s", ratio=ratio) } ## Finish off ## default is to display all corrections formula(out) <- . ~ r fvnames(out, ".") <- setdiff(rev(colnames(out)), c("r", "v")) ## unitname(out) <- unitname(X) ## copy to other components if(ratio) out <- conform.ratfv(out) attr(out, "bw") <- bw.used return(out) } # Smoothing Estimate of Weighted Pair Correlation # d = vector of relevant distances # w = vector of edge correction weights (in normal use) # denargs = arguments to density.default # lambda2area = constant lambda^2 * areaW (in normal use) sewpcf <- function(d, w, denargs, lambda2area, divisor=c("r","d")) { divisor <- match.arg(divisor) nw <- length(w) if(nw != length(d) && nw != 1) stop("Internal error: incorrect length of weights vector in sewpcf") if(divisor == "d") { w <- w/d if(!all(good <- is.finite(w))) { nbad <- sum(!good) warning(paste(nbad, "infinite, NA or NaN", ngettext(nbad, "contribution was", "contributions were"), "deleted from pcf estimate with divisor='d'.", "Fraction deleted: ", paste0(round(100 * nbad/length(w), 2), "%")), call.=FALSE) d <- d[good] w <- w[good] } nw <- length(w) } kden <- unnormdensity(x=d, weights=w, defaults=denargs) r <- kden$x y <- kden$y if(divisor == "r") y <- y/r g <- y/(2 * pi * lambda2area) result <- data.frame(r=r,g=g) attr(result, "bw") <- kden$bw return(result) } spatstat.explore/R/rhohat.R0000644000176200001440000010202014611073310015372 0ustar liggesusers#' #' rhohat.R #' #' $Revision: 1.116 $ $Date: 2023/08/14 06:33:10 $ #' #' Non-parametric estimation of a function rho(z) determining #' the intensity function lambda(u) of a point process in terms of a #' spatial covariate Z(u) through lambda(u) = rho(Z(u)). #' More generally allows offsets etc. #' Copyright (c) Adrian Baddeley 2015-2022 #' GNU Public Licence GPL >= 2.0 rhohat <- function(object, covariate, ...) { UseMethod("rhohat") } rhohat.ppp <- rhohat.quad <- function(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps", "grow.frame", "shrink.frame"), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) rule.eps <- match.arg(rule.eps) X <- if(is.ppp(object)) object else object$data if(is.marked(X) && !is.multitype(X)) { warning(paste("rhohat does not handle marked point pattern data", "unless the marks are categorical (factor) values;", "marks were ignored"), call.=FALSE) X <- unmark(X) } if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 ## Determine reference model (and validate arguments) if(is.null(baseline)) { ## Uniform intensity ## WAS: model <- ppm(object ~1, subset=subset) model <- exactppm(X, subset=subset) reference <- "Lebesgue" } else { ## Intensity proportional to baseline ## WAS: model <- ppm(object ~ offset(log(baseline)), subset=subset) model <- exactppm(X, baseline=baseline, subset=subset, eps=eps, dimyx=dimyx, rule.eps=rule.eps) reference <- "baseline" } modelcall <- NULL if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(X) } else if(inherits(covariate, "distfun")) { covunits <- unitname(covariate) } else { covunits <- NULL } W <- Window(X) if(!is.null(subset)) W <- W[subset, drop=FALSE] areaW <- area(W) rhohatEngine(model, covariate, reference, areaW, ..., subset=subset, do.CI=do.CI, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps, rule.eps=rule.eps), spatCovarArgs=list(clip.predict=FALSE, jitter=jitter, jitterfactor=jitterfactor, interpolate=interpolate), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } #' Code for rhohat.ppm is moved to spatstat.model rhohatEngine <- function(model, covariate, reference=c("Lebesgue", "model", "baseline"), volume, ..., subset=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), resolution=list(), spatCovarArgs=list(), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, covunits=NULL, confidence=0.95, breaks=NULL, modelcall=NULL, callstring="rhohat") { reference <- match.arg(reference) #' evaluate the covariate at data points and at pixels stuff <- do.call(spatialCovariateEvidence, c(list(model=model, covariate=covariate, subset=subset), resolution, spatCovarArgs)) # unpack values <- stuff$values # values at each data point ZX <- values$ZX lambdaX <- values$lambdaX # values at each pixel Zimage <- values$Zimage lambdaimage <- values$lambdaimage # could be multiple images # values at each pixel (for .ppp, .ppm) or quadrature point (for .lpp, .lppm) Zvalues <- values$Zvalues lambda <- values$lambda ## weights if(!is.null(weights)) { X <- as.ppp(stuff$X) if(is.im(weights)) weights <- safelookup(weights, X) else if(is.function(weights)) weights <- weights(X$x, X$y) else if(is.numeric(weights) && is.vector(as.numeric(weights))) check.nvector(weights, npoints(X), vname="weights") else stop(paste(sQuote("weights"), "should be a vector, a pixel image, or a function")) } # normalising constants denom <- volume * (if(reference == "Lebesgue" || horvitz) 1 else mean(lambda)) # info savestuff <- list(reference = reference, horvitz = horvitz, Zimage = Zimage, lambdaimage = lambdaimage) # calculate rho-hat result <- rhohatCalc(ZX, Zvalues, lambda, denom, ..., weights=weights, lambdaX=lambdaX, method=method, horvitz=horvitz, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, confidence=confidence, breaks=breaks, covunits=covunits, modelcall=modelcall, callstring=callstring, savestuff=savestuff) return(result) } # basic calculation of rhohat from covariate values rhohatCalc <- local({ interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } ## note: this function normalises the weights, like density.default LocfitRaw <- function(x, ..., weights=NULL) { if(is.null(weights)) weights <- 1 requireNamespace("locfit", quietly=TRUE) do.call.matched(locfit::locfit.raw, append(list(x=x, weights=weights), list(...))) } varlog <- function(obj,xx) { ## variance of log f-hat stopifnot(inherits(obj, "locfit")) if(!identical(obj$trans, exp)) stop("internal error: locfit object does not have log link") ## the following call should have band="local" but that produces NaN's pred <- predict(obj, newdata=xx, se.fit=TRUE, what="coef") se <- pred$se.fit return(se^2) } unimodalLogLikelihood <- function(zcrit, allargs) { rhofun <- unimodalEstimate(zcrit, allargs) ZX <- allargs$ZX return(sum(log(pmax(.Machine$double.eps, rhofun(ZX))))) } unimodalEstimate <- function(zcrit, allargs) { #' unpack ZX <- allargs$ZX weights <- allargs$weights areas <- allargs$areas totalarea <- allargs$totalarea G <- allargs$G inverted <- allargs$inverted #' split according to zcrit left <- (ZX <= zcrit) right <- !left zleft <- ZX[left] zright <- ZX[right] weightsleft <- weights[left] weightsright <- weights[right] areacrit <- G(zcrit) * totalarea arealeft <- areas[left] arearight <- areas[right] #' left side of critical point yleft <- monoCalc(weightsleft, arealeft, areacrit, increasing=!inverted) #' right side of critical point yright <- monoCalc(weightsright, arearight - areacrit, totalarea - areacrit, increasing=inverted) #' value at critical point ycrit <- if(inverted) min(yleft, yright) else max(yleft, yright) yinf <- if(inverted) max(yright) else 0 #' build function zz <- c(zleft, zright) yy <- c(yleft, yright, yinf) lambda <- stepfun(x = zz, y = yy, right=TRUE, f=1) return(lambda) } monoCalc <- function(weights, areas, totarea, increasing) { if(length(weights) == 0) return(numeric(0)) if(increasing) areas <- totarea - rev(areas) ## maximum upper sets algorithm y <- numeric(0) a <- weights b <- diff(c(0, areas)) while(length(b) > 0) { u <- cumsum(a)/cumsum(b) if(any(bad <- !is.finite(u))) # divide by zero etc u[bad] <- max(u[!bad], 0) k <- which.max(u) y <- c(y, rep(u[k], k)) a <- a[-(1:k)] b <- b[-(1:k)] } if(increasing) y <- rev(y) return(y) } ## ............... main function ...................... rhohatCalc <- function(ZX, Zvalues, lambda, denom, ..., weights=NULL, lambdaX, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), do.CI=TRUE, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, breaks=NULL, positiveCI=(smoother == "local"), markovCI=TRUE, covunits = NULL, modelcall=NULL, callstring=NULL, savestuff=list()) { method <- match.arg(method) smoother <- match.arg(smoother) ## check availability of locfit package if(smoother == "local" && !requireNamespace("locfit", quietly=TRUE)) { warning(paste("In", paste0(dQuote(callstring), ":"), "package", sQuote("locfit"), "is not available;", "unable to perform local likelihood smoothing;", "using kernel smoothing instead"), call.=FALSE) smoother <- "kernel" } ## validate stopifnot(is.numeric(ZX)) stopifnot(is.numeric(Zvalues)) stopifnot(is.numeric(lambda)) stopifnot(length(lambda) == length(Zvalues)) stopifnot(all(is.finite(lambda))) check.1.real(denom) ## if(!do.CI) vvv <- NULL ## if(horvitz) { ## data points will be weighted by reciprocal of model intensity weights <- (weights %orifnull% 1)/lambdaX } ## normalising constants nX <- if(is.null(weights)) length(ZX) else sum(weights) kappahat <- nX/denom ## limits Zrange <- range(ZX, Zvalues) if(is.null(from)) from <- Zrange[1] if(is.null(to)) to <- Zrange[2] if(from > Zrange[1] || to < Zrange[2]) stop(paste("In", paste0(dQuote(callstring), ":"), "interval [from, to] =", prange(c(from,to)), "does not contain the range of data values =", prange(Zrange)), call.=FALSE) ## critical constant for CI's crit <- qnorm((1+confidence)/2) percentage <- paste(round(100 * confidence), "%%", sep="") CIblurb <- paste("pointwise", percentage, "confidence interval") ## estimate densities switch(smoother, kernel = { ## ............... kernel smoothing ...................... ## reference density (normalised) for calculation ghat <- density(Zvalues,weights=if(horvitz) NULL else lambda/sum(lambda), bw=bwref,adjust=adjust,n=n,from=from,to=to, ..., warnWbw=FALSE) xxx <- ghat$x ghatfun <- interpolate(ghat) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- unnormdensity(ZX,weights=weights, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) fhatfun <- interpolate(fhat) Ghat.xxx <- denom * ghatfun(xxx) yyy <- fhatfun(xxx)/Ghat.xxx if(do.CI) { ## compute variance approximation sigma <- fhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 fstar <- unnormdensity(ZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) fstarfun <- interpolate(fstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * fstarfun(xxx)/Ghat.xxx^2 } }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * ghatfun(ZX)) rhat <- unnormdensity(ZX, weights=wt, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) rhatfun <- interpolate(rhat) yyy <- rhatfun(xxx) if(do.CI) { ## compute variance approximation sigma <- rhat$bw rongstar <- unnormdensity(ZX, weights=wt^2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) rongstarfun <- interpolate(rongstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * rongstarfun(xxx) } }, transform={ ## probability integral transform Gfun <- interpolate(ghat$x, cumsum(ghat$y)/sum(ghat$y)) GZX <- Gfun(ZX) ## smooth density on [0,1] qhat <- unnormdensity(GZX,weights=weights, bw=bw,adjust=adjust, n=n, from=0, to=1, ...) qhatfun <- interpolate(qhat) ## edge effect correction one <- density(seq(from=0,to=1,length.out=512), bw=qhat$bw, adjust=1, n=n,from=0, to=1, ...) onefun <- interpolate(one) ## apply to transformed values Gxxx <- Gfun(xxx) Dxxx <- denom * onefun(Gxxx) yyy <- qhatfun(Gxxx)/Dxxx if(do.CI) { ## compute variance approximation sigma <- qhat$bw weights2 <- if(is.null(weights)) NULL else weights^2 qstar <- unnormdensity(GZX,weights=weights2, bw=bw,adjust=adjust/sqrt(2), n=n,from=0, to=1, ...) qstarfun <- interpolate(qstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * qstarfun(Gxxx)/Dxxx^2 } }) if(do.CI) { vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") sd <- sqrt(vvv) if(!positiveCI) { hi <- yyy + crit * sd lo <- yyy - crit * sd } else { sdlog <- ifelse(yyy > 0, sd/yyy, 0) sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } } }, local = { ## .................. local likelihood smoothing ....................... xlim <- c(from, to) xxx <- seq(from, to, length=n) ## reference density ghat <- LocfitRaw(Zvalues, weights=if(horvitz) NULL else lambda, xlim=xlim, ...) ggg <- predict(ghat, xxx) ## relative density switch(method, ratio={ ## compute ratio of smoothed densities fhat <- LocfitRaw(ZX, weights=weights, xlim=xlim, ...) fff <- predict(fhat, xxx) yyy <- kappahat * fff/ggg if(do.CI) { ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(fhat, xxx) + varlogN } }, reweight={ ## weight Z values by reciprocal of reference wt <- (weights %orifnull% 1)/(denom * predict(ghat,ZX)) sumwt <- sum(wt) rhat <- LocfitRaw(ZX, weights=wt, xlim=xlim, ...) rrr <- predict(rhat, xxx) yyy <- sumwt * rrr if(do.CI) { ## compute approximation to variance of log rho-hat varsumwt <- mean(yyy /(denom * ggg)) * diff(xlim) varlogsumwt <- varsumwt/sumwt^2 vvv <- varlog(rhat, xxx) + varlogsumwt } }, transform={ ## probability integral transform Gfun <- approxfun(xxx, cumsum(ggg)/sum(ggg), rule=2) GZX <- Gfun(ZX) ## smooth density on [0,1], end effect corrected qhat <- LocfitRaw(GZX, weights=weights, xlim=c(0,1), ...) ## apply to transformed values Gxxx <- Gfun(xxx) qqq <- predict(qhat, Gxxx) yyy <- kappahat * qqq if(do.CI) { ## compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(qhat, Gxxx) + varlogN } }) if(do.CI) { vvvname <- "Variance of log of estimator" vvvlabel <- paste("bold(Var)~log(hat(%s)", paren(covname), ")", sep="") sdlog <- sqrt(vvv) if(positiveCI) { sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } else { hi <- yyy * (1 + crit * sdlog) lo <- yyy * (1 - crit * sdlog) } } }, increasing = , decreasing = { ## ................. monotone .................................... ## .................. nonparametric maximum likelihood ............ if(is.null(weights)) weights <- rep(1, length(ZX)) if(method != "ratio") warning(paste("Argument method =", sQuote(method), "is ignored when smoother =", sQuote(smoother))) #' observed (sorted) oX <- order(ZX) ZX <- ZX[oX] weights <- weights[oX] #' reference CDF G <- ewcdf(Zvalues, lambda) #' reference denominator ('area') at each observed value if(smoother == "decreasing") { areas <- denom * G(ZX) } else { areas <- denom * (1 - G(rev(ZX))) weights <- rev(weights) } #' maximum upper sets algorithm rho <- numeric(0) darea <- diff(c(0, areas)) dcount <- weights while(length(darea) > 0) { u <- cumsum(dcount)/cumsum(darea) if(any(bad <- !is.finite(u))) # divide by zero etc u[bad] <- max(u[!bad], 0) k <- which.max(u) rho <- c(rho, rep(u[k], k)) darea <- darea[-(1:k)] dcount <- dcount[-(1:k)] } rho <- c(rho, 0) if(smoother == "increasing") rho <- rev(rho) #' compute as a stepfun rhofun <- stepfun(x = ZX, y=rho, right=TRUE, f=1) #' evaluate on a grid xlim <- c(from, to) xxx <- seq(from, to, length=n) yyy <- rhofun(xxx) #' vvv <- hi <- lo <- NULL savestuff$rhofun <- rhofun }, mountain = , valley = { ## ................. unimodal .................................... ## .................. nonparametric maximum likelihood ............ if(is.null(weights)) weights <- rep(1, length(ZX)) if(method != "ratio") warning(paste("Argument method =", sQuote(method), "is ignored when smoother =", sQuote(smoother))) #' observed (sorted) oX <- order(ZX) ZX <- ZX[oX] weights <- weights[oX] #' collapse duplicates if(anyDuplicated(ZX)) { islast <- rev(c(TRUE, diff(rev(ZX)) != 0)) ZX <- ZX[islast] weights <- diff(c(0, cumsum(weights)[islast])) } #' reference CDF G <- ewcdf(Zvalues, lambda) #' reference denominator ('area') at each observed value areas <- denom * G(ZX) totalarea <- denom #' bundle all data allargs <- list(ZX = ZX, weights = weights, areas = areas, totalarea = totalarea, G = G, inverted = (smoother == "valley")) #' optimize position of peak v <- optimise(unimodalLogLikelihood, range(ZX), maximum=TRUE, allargs=allargs) zcrit <- as.numeric(v$maximum) #' form stepfun rhofun <- unimodalEstimate(zcrit, allargs) #' evaluate on a grid xlim <- c(from, to) xxx <- seq(from, to, length=n) yyy <- rhofun(xxx) #' No variances vvv <- hi <- lo <- NULL #' save info savestuff$rhofun <- rhofun savestuff$zcrit <- zcrit }, piecewise = { ## .................. piecewise constant ............ if(is.null(breaks)) { breaks <- pretty(c(from, to)) } else { stopifnot(is.numeric(breaks)) breaks <- exactCutBreaks(c(from, to), breaks) } if(method != "ratio") { warning(paste("Argument method =", sQuote(method), "is not implemented when smoother = 'piecewise';", "replaced by method = 'ratio'")) method <- "ratio" } ## convert numerical covariate values to factor cutZvalues <- cut(Zvalues, breaks=breaks) cutZX <- cut(ZX, breaks=breaks) ## denominator areas <- denom * tapplysum(lambda, list(cutZvalues))/sum(lambda) ## numerator counts <- if(is.null(weights)) { as.numeric(table(cutZX)) } else { tapplysum(weights, list(cutZX)) } ## estimate of rho(z) for each band of z values rhovals <- counts/areas #' convert to a stepfun rhofun <- stepfun(x = breaks, y=c(0, rhovals, 0)) #' evaluate on a grid xlim <- c(from, to) xxx <- seq(from, to, length=n) yyy <- rhofun(xxx) if(do.CI) { #' variance vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") varnum <- if(is.null(weights)) counts else tapplysum(weights^2, list(cutZX)) varvals <- varnum/areas^2 varfun <- stepfun(x = breaks, y=c(0, varvals, 0)) vvv <- varfun(xxx) sd <- sqrt(vvv) #' confidence bands if(!positiveCI) { hi <- yyy + crit * sd lo <- yyy - crit * sd } else { sdlog <- ifelse(yyy > 0, sd/yyy, 0) sss <- exp(crit * sdlog) hi <- yyy * sss lo <- yyy / sss if(markovCI) { ## truncate extremely large confidence intervals ## using Markov's Inequality hi <- pmin(hi, yyy/(1-confidence)) } } } ## pack up savestuff$rhofun <- rhofun savestuff$breaks <- breaks }) ## pack into fv object df <- data.frame(xxx=xxx, rho=yyy, ave=kappahat) names(df)[1] <- covname desc <- c(paste("Covariate", covname), "Estimated intensity", "Average intensity") parencov <- paren(covname) labl <- c(covname, paste0("hat(%s)", parencov), "bar(%s)") if(did.variance <- !is.null(vvv)) { df <- cbind(df, data.frame(var=vvv, hi=hi, lo=lo)) desc <- c(desc, vvvname, paste("Upper limit of", CIblurb), paste("Lower limit of", CIblurb)) labl <- c(labl, vvvlabel, paste0("%s[hi]", parencov), paste0("%s[lo]", parencov)) } rslt <- fv(df, argu=covname, ylab=substitute(rho(X), list(X=as.name(covname))), valu="rho", fmla= as.formula(paste(". ~ ", covname)), alim=c(from, to), labl=labl, desc=desc, unitname=covunits, fname="rho", yexp=substitute(rho(X), list(X=as.name(covname)))) if(did.variance) { fvnames(rslt, ".") <- c("rho", "ave", "hi", "lo") fvnames(rslt, ".s") <- c("hi", "lo") } else fvnames(rslt, ".") <- c("rho", "ave") ## pack up class(rslt) <- c("rhohat", class(rslt)) ## add info stuff <- list(modelcall = modelcall, callstring = callstring, sigma = switch(smoother, kernel=sigma, local=NULL), covname = paste(covname, collapse=""), ZX = ZX, lambda = lambda, method = method, smoother = smoother, confidence = confidence, positiveCI = positiveCI) attr(rslt, "stuff") <- append(stuff, savestuff) return(rslt) } rhohatCalc }) ## ........... end of 'rhohatCalc' ................................. print.rhohat <- function(x, ...) { s <- attr(x, "stuff") smoother <- s$smoother method <- s$method splat("Intensity function estimate (class rhohat)", "for the covariate", s$covname) switch(s$reference, Lebesgue=splat("Function values are absolute intensities"), baseline=splat("Function values are relative to baseline"), model={ splat("Function values are relative to fitted model") print(s$modelcall) }) cat("Type of estimate: ") switch(smoother, kernel = , local = splat("Smooth function of covariate"), increasing = splat("Increasing function of covariate"), decreasing = splat("Decreasing function of covariate"), mountain = splat("Unimodal (mountain) function of covariate"), valley = splat("Inverted unimodal (valley) function of covariate"), piecewise = splat("Piecewise-constant function of covariate"), splat("unknown smoother =", sQuote(smoother)) ) cat("Estimation method: ") switch(smoother, piecewise = splat("average intensity in sub-regions"), increasing = , decreasing = splat("nonparametric maximum likelihood"), mountain = , valley = { splat("nonparametric maximum likelihood") if(!is.null(zcrit <- s$zcrit)) { with(summary(unitname(x)), splat("Critical z value =", signif(zcrit, 4), plural, explain)) } }, kernel = { switch(method, ratio = splat("ratio of fixed-bandwidth kernel smoothers"), reweight={ splat("fixed-bandwidth kernel smoother of weighted data") }, transform={ splat("probability integral transform,", "edge-corrected fixed bandwidth kernel smoothing", "on [0,1]") }, splat("Unknown method =", sQuote(s$method))) if(isTRUE(s$horvitz)) splat("\twith Horvitz-Thompson weight") splat("\tActual smoothing bandwidth sigma = ", signif(s$sigma,5)) }, local = { switch(method, ratio = splat("ratio of local likelihood smoothers"), reweight={ splat("local likelihood smoother of weighted data") }, transform={ splat("probability integral transform followed by", "local likelihood smoothing on [0,1]") }, splat("Unknown method =", sQuote(s$method))) if(isTRUE(s$horvitz)) splat("\twith Horvitz-Thompson weight") }) if(all(c("hi", "lo") %in% names(x))) { positiveCI <- s$positiveCI %orifnull% (smoother == "local") confidence <- s$confidence %orifnull% 0.95 splat("Pointwise", paste0(100 * confidence, "%"), "confidence bands for rho(x)\n\t based on asymptotic variance of", if(positiveCI) "log(rhohat(x))" else "rhohat(x)") } splat("Call:", s$callstring) cat("\n") NextMethod("print") } plot.rhohat <- function(x, ..., do.rug=TRUE) { xname <- short.deparse(substitute(x)) force(x) s <- attr(x, "stuff") covname <- s$covname asked.rug <- !missing(do.rug) && identical(rug, TRUE) snam <- intersect(c("hi", "lo"), names(x)) if(length(snam) == 0) snam <- NULL out <- do.call(plot.fv, resolve.defaults(list(x=quote(x)), list(...), list(main=xname, shade=snam))) if(identical(list(...)$limitsonly, TRUE)) return(out) if(do.rug) { rugx <- ZX <- s$ZX # check whether it's the default plot argh <- list(...) isfo <- unlist(lapply(argh, inherits, what="formula")) if(any(isfo)) { # a plot formula was given; inspect RHS fmla <- argh[[min(which(isfo))]] rhs <- rhs.of.formula(fmla) vars <- variablesinformula(rhs) vars <- vars[vars %in% c(colnames(x), ".x", ".y")] if(length(vars) == 1 && vars %in% c(covname, ".x")) { # expression in terms of covariate rhstr <- as.character(rhs)[2] dat <- list(ZX) names(dat) <- vars[1] rugx <- as.numeric(eval(parse(text=rhstr), dat)) } else { if(asked.rug) warning("Unable to add rug plot") rugx <- NULL } } if(!is.null(rugx)) { # restrict to x limits, if given if(!is.null(xlim <- list(...)$xlim)) rugx <- rugx[rugx >= xlim[1] & rugx <= xlim[2]] # finally plot the rug if(length(rugx) > 0) rug(rugx) } } invisible(NULL) } predict.rhohat <- local({ predict.rhohat <- function(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) { trap.extra.arguments(..., .Context="in predict.rhohat") what <- match.arg(what) #' extract info s <- attr(object, "stuff") reference <- s$reference #' check availability if((what %in% c("lo", "hi", "se")) && !("hi" %in% names(object))) stop("Standard error and confidence bands are not available in this object", call.=FALSE) #' convert to (linearly interpolated) function x <- with(object, .x) y <- if(what == "se") sqrt(object[["var"]]) else object[[what]] fun <- approxfun(x, y, rule=2) #' extract image(s) of covariate Z <- s$Zimage #' apply fun to Z Y <- if(is.im(Z)) evalfun(Z, fun) else solapply(Z, evalfun, f=fun) if(reference != "Lebesgue" && !relative) { #' adjust to reference baseline Lam <- s$lambdaimage # could be an image or a list of images #' multiply Y * Lam (dispatch on 'Math' is not yet working) netted <- is.linim(Y) || (is.solist(Y) && all(sapply(Y, is.linim))) netted <- netted && requireNamespace("spatstat.linnet") if(!netted) { Y <- imagelistOp(Lam, Y, "*") } else { if(is.solist(Y)) Y <- as.linimlist(Y) Y <- spatstat.linnet::LinimListOp(Lam, Y, "*") } } return(Y) } evalfun <- function(X, f) { force(f) force(X) if(is.linim(X) && requireNamespace("spatstat.linnet")) return(spatstat.linnet::eval.linim(f(X))) if(is.im(X)) return(eval.im(f(X))) return(NULL) } predict.rhohat }) as.function.rhohat <- function(x, ..., value=".y", extrapolate=TRUE) { NextMethod("as.function") } simulate.rhohat <- function(object, nsim=1, ..., drop=TRUE) { trap.extra.arguments(..., .Context="in simulate.rhohat") lambda <- predict(object) if(is.linim(lambda) || (is.solist(lambda) && all(sapply(lambda, is.linim)))) { if(!requireNamespace("spatstat.linnet")) { warning(paste("Cannot generate simulations on a network;", "this requires the package 'spatstat.linnet'"), call.=FALSE) return(NULL) } result <- spatstat.linnet::rpoislpp(lambda, nsim=nsim, drop=drop) } else { result <- rpoispp(lambda, nsim=nsim, drop=drop) } return(result) } spatstat.explore/R/nnclean.R0000644000176200001440000002051414611073310015532 0ustar liggesusers# # nnclean.R # # Nearest-neighbour clutter removal # # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # # $Revision: 1.20 $ $Date: 2020/12/19 05:25:06 $ # nnclean <- function(X, k, ...) { UseMethod("nnclean") } nnclean.pp3 <- function(X, k, ..., convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) stopifnot(inherits(X, "pp3")) validposint(k, "nnclean.pp3") kthNND <- nndist(X, k=k) dont.complain.about(kthNND) # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(quote(kthNND), k=k), list(...), list(d=3, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # tack results onto point pattern as marks pp <- em$probs zz <- factor(em$z, levels=c(0,1)) levels(zz) <- c("noise", "feature") mm <- hyperframe(prob=pp, label=zz) marks(X) <- cbind(marks(X), mm) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nnclean.ppp <- function(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley Xname <- short.deparse(substitute(X)) validposint(k, "nnclean.ppp") if(!edge.correct) { # compute vector of k-th nearest neighbour distances kthNND <- nndist(X, k=k) } else { # replicate data periodically # (ensuring original points are listed first) Xbox <- X[as.rectangle(X)] Xpand <- periodify(Xbox, ix=c(0,-1,1), iy=c(0,-1,1), check=FALSE) # trim to margin W <- expand.owin(X$window, (1+2*wrap)^2) Xpand <- Xpand[W] kthNND <- nndist(Xpand, k=k) } dont.complain.about(kthNND) # apply classification algorithm em <- do.call(nncleanEngine, resolve.defaults(list(quote(kthNND), k=k), list(...), list(d=2, tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit, Xname=Xname))) # extract results pp <- em$probs zz <- em$z zz <- factor(zz, levels=c(0,1)) levels(zz) <- c("noise", "feature") df <- data.frame(class=zz,prob=pp) if(edge.correct) { # trim back to original point pattern df <- df[seq_len(X$n), ] } # tack on marx <- marks(X, dfok=TRUE) if(is.null(marx)) marks(X, dfok=TRUE) <- df else marks(X, dfok=TRUE) <- cbind(df, marx) attr(X, "theta") <- em[c("lambda1", "lambda2", "p")] attr(X, "info") <- em[c("d", "niter", "maxit", "converged")] attr(X, "hist") <- em$hist return(X) } nncleanEngine <- function(kthNND, k, d, ..., tol = 0.001, maxit = 50, plothist = FALSE, lineargs = list(), verbose=TRUE, Xname="X") { ## Adapted from statlib file NNclean.q ## Authors: Simon Byers and Adrian Raftery ## Adapted for spatstat by Adrian Baddeley n <- length(kthNND) ## Error handler by Adrian if(k >= n) { if(verbose) cat(paste("Cannot compute neighbours of order k =", k, "for a pattern of", n, "data points;", "treating all points as noise"), call.=FALSE) return(list(z = rep(0, n), probs = rep(0, n), lambda1 = NA, lambda2 = NA, p = 0, kthNND = kthNND, d=d, n=n, k=k, niter = 0, maxit = maxit, converged = TRUE, hist=NULL)) } ## Undocumented extension by Adrian Baddeley 2014 ## Allow different dimensions in feature and noise. ## d[1] is cluster dimension. d <- ensure2vector(d) alpha.d <- (2. * pi^(d/2.))/(d * gamma(d/2.)) # raise to power d for efficiency kNNDpowd1 <- kthNND^(d[1]) kNNDpowd2 <- kthNND^(d[2]) # # Now use kthNND in E-M algorithm # First set up starting guesses. # # probs <- numeric(n) thresh <- (min(kthNND) + diff(range(kthNND))/3.) high <- (kthNND > thresh) delta <- as.integer(high) p <- 0.5 lambda1 <- k/(alpha.d[1] * mean(kNNDpowd1[!high])) lambda2 <- k/(alpha.d[2] * mean(kNNDpowd2[ high])) loglik.old <- 0. loglik.new <- 1. # # Iterator starts here, # Z <- !kthNND niter <- 0 while(abs(loglik.new - loglik.old)/(1 + abs(loglik.new)) > tol) { if(niter >= maxit) { warning(paste("E-M algorithm failed to converge in", maxit, ngettext(maxit, "iteration", "iterations")), call.=FALSE) break } niter <- niter + 1 # E - step f1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) f2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) delta[!Z] <- (p * f1)/(p * f1 + (1 - p) * f2) delta[Z] <- 0 # M - step sumdelta <- sum(delta) negdelta <- 1. - delta p <- sumdelta/n lambda1 <- (k * sumdelta)/(alpha.d[1] * sum(kNNDpowd1 * delta)) lambda2 <- (k * (n - sumdelta))/(alpha.d[2] * sum(kNNDpowd2 * negdelta)) # evaluate marginal loglikelihood loglik.old <- loglik.new loglik.new <- sum( - p * lambda1 * alpha.d[1] * (kNNDpowd1 * delta) - (1. - p) * lambda2 * alpha.d[2] * (kNNDpowd2 * negdelta) + delta * k * log(lambda1 * alpha.d[1]) + negdelta * k * log(lambda2 * alpha.d[2])) if(verbose) cat(paste("Iteration", niter, "\tlogLik =", loglik.new, "\tp =", signif(p,4), "\n")) } if(plothist) { dotargs <- list(...) if(spatstat.options('monochrome')) dotargs <- col.args.to.grey(dotargs) ## compute plot limits to include both histogram and density xlim <- c(0, max(kthNND)) H <- do.call(hist, resolve.defaults(list(quote(kthNND), plot=FALSE, warn.unused=FALSE), dotargs, list(nclass=40))) barheights <- H$density support <- seq(from=xlim[1], to=xlim[2], length.out = 200) fittedy <- p * dknn(support, lambda=lambda1, k = k, d = d[1]) + (1 - p) * dknn(support, lambda=lambda2, k = k, d = d[2]) ylim <- range(c(0, barheights, fittedy)) xlab <- paste("Distance to", ordinal(k), "nearest neighbour") ## now plot it (unless overridden by plot=FALSE) reallyplot <- resolve.1.default("plot", list(...), list(plot=TRUE)) H <- do.call(hist, resolve.defaults(list(quote(kthNND), probability=TRUE), dotargs, list(plot=TRUE, warn.unused=reallyplot, nclass=40, xlim = xlim, ylim=ylim, xlab = xlab, ylab = "Probability density", axes = TRUE, main=""))) H$xname <- xlab if(reallyplot) { box() lineargs <- resolve.defaults(lineargs, list(col="green", lwd=2)) if(spatstat.options("monochrome")) lineargs <- col.args.to.grey(lineargs) do.call(lines, append(list(x=support, y=fittedy), lineargs)) } } # delta1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d[1]) delta2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d[2]) probs[!Z] <- delta1/(delta1 + delta2) probs[Z] <- 1 # if(verbose) { cat("Estimated parameters:\n") cat(paste("p [cluster] =", signif(p, 5), "\n")) cat(paste("lambda [cluster] =", signif(lambda1, 5), "\n")) cat(paste("lambda [noise] =", signif(lambda2, 5), "\n")) } # # z will be the classifications. 1= in cluster. 0= in noise. # return(list(z = round(probs), probs = probs, lambda1 = lambda1, lambda2 = lambda2, p = p, kthNND = kthNND, d=d, n=n, k=k, niter = niter, maxit = maxit, converged = (niter >= maxit), hist=if(plothist) H else NULL)) } spatstat.explore/R/Jest.R0000644000176200001440000000476014611073307015034 0ustar liggesusers# Jest.S # # Usual invocation to compute J function # if F and G are not required # # $Revision: 4.27 $ $Date: 2023/12/07 10:42:49 $ # # # Jest <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) { X <- as.ppp(X) W <- Window(X) brks <- handle.r.b.args(r, breaks, window=W, pixeps=eps, rmaxdefault=rmax.rule("J", W, intensity(X))) checkspacing <- !isFALSE(list(...)$checkspacing) #' compute F and G FF <- Fest(X, eps=eps, breaks=brks, correction=correction, checkspacing=checkspacing) G <- Gest(X, breaks=brks, correction=correction) # initialise fv object rvals <- FF$r rmax <- max(rvals) Z <- fv(data.frame(r=rvals, theo=1), "r", substitute(J(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="J") # compute J function estimates # this has to be done manually because of the mismatch between names Fnames <- names(FF) Gnames <- names(G) bothnames <- intersect(Fnames, Gnames) if("raw" %in% bothnames) { Jun <- ratiotweak(1-G$raw, 1-FF$raw) Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") attr(Z, "alim") <- range(rvals[FF$raw <= 0.9]) } if("rs" %in% bothnames) { Jrs <- ratiotweak(1-G$rs, 1-FF$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") attr(Z, "alim") <- range(rvals[FF$rs <= 0.9]) } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratiotweak(1-G$han, 1-FF$cs) Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") attr(Z, "alim") <- range(rvals[FF$cs <= 0.9]) } if("km" %in% bothnames) { Jkm <- ratiotweak(1-G$km, 1-FF$km) Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") attr(Z, "alim") <- range(rvals[FF$km <= 0.9]) } if("hazard" %in% bothnames) { Jhaz <- G$hazard - FF$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add more info attr(Z, "F") <- FF attr(Z, "G") <- G attr(Z, "conserve") <- attr(FF, "conserve") unitname(Z) <- unitname(X) return(Z) } spatstat.explore/R/resolve.lambda.R0000644000176200001440000003477114611073310017024 0ustar liggesusers#' resolve.lambda.R #' #' Common code to evaluate the intensity 'lambda' in Kinhom, pcfinhom #' (and multitype counterparts) #' #' resolve.lambda #' resolve.lambdacross #' resolve.reciplambda #' validate.weights #' updateData (generic) (soon to be deprecated) #' #' $Revision: 1.21 $ $Date: 2024/01/10 10:58:02 $ resolve.lambda <- function(X, lambda=NULL, ...) { UseMethod("resolve.lambda") } resolve.lambda.ppp <- function(X, lambda=NULL, ..., sigma=NULL, varcov=NULL, leaveoneout=TRUE, update=TRUE, check=TRUE) { dangerous <- "lambda" danger <- TRUE if(npoints(X) == 0) { danger <- FALSE lambda <- numeric(0) } else if(is.null(lambda)) { ## No intensity data provided ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) if(check) validate.weights(lambda, how="density estimation") danger <- FALSE } else if(is.im(lambda)) { lambda <- safelookup(lambda, X) } else if(is.function(lambda)) { lambda <- lambda(X$x, X$y) } else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { check.nvector(lambda, npoints(X), vname="lambda") } else if(inherits(lambda, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambda' is a fitted model", call.=FALSE) ## model provides intensity model <- lambda if(update) { model <- update(model, X) danger <- FALSE } if(inherits(model, "slrm")) { #' predict.slrm has different syntax, #' and does not support leave-one-out prediction lambda <- predict(model)[X] } else { lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image,", "a fitted model, or a function")) if(check) validate.weights(lambda) return(list(lambda=lambda, danger=danger, dangerous=if(danger) dangerous else NULL)) } resolve.reciplambda <- function(X, lambda=NULL, reciplambda=NULL, ...) { UseMethod("resolve.reciplambda") } resolve.reciplambda.ppp <- function(X, lambda=NULL, reciplambda=NULL, ..., sigma=NULL, varcov=NULL, leaveoneout=TRUE, update=TRUE, check=TRUE) { dangerous <- c("lambda", "reciplambda") danger <- TRUE if(npoints(X) == 0) { danger <- FALSE lambda <- reciplambda <- numeric(0) } else if(is.null(lambda) && is.null(reciplambda)) { ## No intensity data provided danger <- FALSE ## Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) lambda <- as.numeric(lambda) if(check) validate.weights(lambda, how="density estimation") reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { ## 1/lambda values provided lambda <- NULL if(is.im(reciplambda)) { reciplambda <- safelookup(reciplambda, X) if(check) validate.weights(reciplambda, recip=TRUE, how="image lookup") } else if(is.function(reciplambda)) { reciplambda <- reciplambda(X$x, X$y) if(check) validate.weights(reciplambda, recip=TRUE, how="function evaluation") } else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) { check.nvector(reciplambda, npoints(X), vname="reciplambda") if(check) validate.weights(reciplambda, recip=TRUE) } else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) } else { #' lambda values provided if(is.im(lambda)) { lambda <- safelookup(lambda, X) if(check) validate.weights(lambda, how="image lookup") } else if(is.function(lambda)) { lambda <- lambda(X$x, X$y) if(check) validate.weights(lambda, how="function evaluation") } else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { check.nvector(lambda, npoints(X), vname="lambda") if(check) validate.weights(lambda) } else if(inherits(lambda, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambda' is a fitted model", call.=FALSE) ## model provides intensity model <- lambda if(update) { force(X) env.here <- sys.frame(sys.nframe()) model <- update(model, X, envir=env.here) danger <- FALSE } if(inherits(model, "slrm")) { #' predict.slrm has different syntax, #' and does not support leave-one-out prediction lambda <- predict(model)[X] } else { lambda <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) } if(check) validate.weights(lambda, how="model prediction") } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image,", "a fitted model, or a function")) ## evaluate reciprocal reciplambda <- 1/lambda } return(list(lambda=lambda, reciplambda=reciplambda, danger=danger, dangerous=if(danger) dangerous else NULL)) } resolve.lambdacross <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ...) { UseMethod("resolve.lambdacross") } resolve.lambdacross.ppp <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., lambdaX=NULL, sigma=NULL, varcov=NULL, leaveoneout=TRUE, update=TRUE, lambdaIJ=NULL, Iexplain="points satisfying condition I", Jexplain="points satisfying condition J") { dangerous <- c("lambdaI", "lambdaJ") dangerI <- dangerJ <- TRUE XI <- X[I] XJ <- X[J] nI <- npoints(XI) nJ <- npoints(XJ) lamIname <- short.deparse(substitute(lambdaI)) lamJname <- short.deparse(substitute(lambdaJ)) bothnames <- c(lamIname, lamJname) givenI <- !is.null(lambdaI) givenJ <- !is.null(lambdaJ) givenX <- !is.null(lambdaX) if(givenI != givenJ) { givenone <- bothnames[c(givenI, givenJ)] missedone <- setdiff(bothnames, givenone) stop(paste("If", givenone, "is given, then", missedone, "should also be given"), call.=FALSE) } if(givenX && givenI && givenJ) warning(paste(paste(sQuote(bothnames), collapse=" and "), "were ignored because", sQuote("lambdaX"), "was given"), call.=FALSE) if(givenX) { ## Intensity values for all points of X if(is.im(lambdaX)) { ## Look up intensity values lambdaI <- safelookup(lambdaX, XI) lambdaJ <- safelookup(lambdaX, XJ) } else if(is.imlist(lambdaX) && is.multitype(X) && length(lambdaX) == length(levels(marks(X)))) { ## Look up intensity values Y <- split(X) lamY <- mapply("[", x=lambdaX, i=Y, SIMPLIFY=FALSE) lamX <- unsplit(lamY, marks(X)) lambdaI <- lamX[I] lambdaJ <- lamX[J] } else if(is.function(lambdaX)) { ## evaluate function at locations if(!is.marked(X) || length(formals(lambdaX)) == 2) { lambdaI <- lambdaX(XI$x, XI$y) lambdaJ <- lambdaX(XJ$x, XJ$y) } else { lambdaI <- lambdaX(XI$x, XI$y, marks(XI)) lambdaJ <- lambdaX(XJ$x, XJ$y, marks(XJ)) } } else if(is.numeric(lambdaX) && is.vector(as.numeric(lambdaX))) { ## vector of intensity values if(length(lambdaX) != npoints(X)) stop(paste("The length of", sQuote("lambdaX"), "should equal the number of points of X")) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } else if(inherits(lambdaX, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambdaX' is a fitted model", call.=FALSE) ## point process model provides intensity model <- lambdaX if(update) { force(X) env.here <- sys.frame(sys.nframe()) model <- update(model, X, envir=env.here) dangerI <- dangerJ <- FALSE dangerous <- "lambdaIJ" } if(inherits(model, "slrm")) { #' predict.slrm has different syntax, #' and does not support leave-one-out prediction Lambda <- predict(model) lambdaI <- Lambda[XI] lambdaJ <- Lambda[XJ] } else { ## re-fit model to data X lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaI <- lambdaX[I] lambdaJ <- lambdaX[J] } } else stop(paste("Argument lambdaX is not understood:", "it should be a numeric vector,", "an image, a function(x,y)", "or a fitted point process model (ppm, kppm or dppm)")) } else { ## lambdaI, lambdaJ expected if(!givenI) { ## estimate intensity dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaI)) { ## look up intensity values lambdaI <- safelookup(lambdaI, XI) } else if(is.function(lambdaI)) { ## evaluate function at locations lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { ## validate intensity vector check.nvector(lambdaI, nI, things=Iexplain, vname="lambdaI") } else if(inherits(lambdaI, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambdaI' is a fitted model", call.=FALSE) ## point process model provides intensity model <- lambdaI if(update) { force(X) env.here <- sys.frame(sys.nframe()) model <- update(model, X, envir=env.here) dangerI <- FALSE dangerous <- setdiff(dangerous, "lambdaI") } if(inherits(model, "slrm")) { #' predict.slrm has different syntax, #' and does not support leave-one-out prediction lambdaI <- predict(model)[XI] } else { lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaI <- lambdaX[I] } } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(!givenJ) { ## estimate intensity dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=leaveoneout) } else if(is.im(lambdaJ)) { ## look up intensity values lambdaJ <- safelookup(lambdaJ, XJ) } else if(is.function(lambdaJ)) { ## evaluate function at locations lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { ## validate intensity vector check.nvector(lambdaJ, nJ, things=Jexplain, vname="lambdaJ") } else if(inherits(lambdaJ, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambdaJ' is a fitted model", call.=FALSE) ## point process model provides intensity model <- lambdaJ if(update) { force(X) env.here <- sys.frame(sys.nframe()) model <- update(model, X, envir=env.here) dangerJ <- FALSE dangerous <- setdiff(dangerous, "lambdaJ") } if(inherits(model, "slrm")) { #' predict.slrm has different syntax, #' and does not support leave-one-out prediction lambdaJ <- predict(model)[XJ] } else { lambdaX <- fitted(model, dataonly=TRUE, leaveoneout=leaveoneout) lambdaJ <- lambdaX[J] } } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) } ## Weight for each pair if(!is.null(lambdaIJ)) { dangerIJ <- TRUE dangerous <- union(dangerous, "lambdaIJ") if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iexplain)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jexplain)) } else { dangerIJ <- FALSE } danger <- dangerI || dangerJ || dangerIJ return(list(lambdaI = lambdaI, lambdaJ = lambdaJ, lambdaIJ=lambdaIJ, danger = danger, dangerous = dangerous)) } validate.weights <- function(x, recip=FALSE, how = NULL, allowzero = recip, allowinf = !recip) { ra <- range(x) offence <- if(!allowinf && !all(is.finite(ra))) "infinite" else if(ra[1] < 0) "negative" else if(!allowzero && ra[1] == 0) "zero" else NULL if(!is.null(offence)) { xname <- deparse(substitute(x)) offenders <- paste(offence, "values of", sQuote(xname)) if(is.null(how)) stop(paste(offenders, "are not allowed"), call.=FALSE) stop(paste(how, "yielded", offenders), call.=FALSE) } return(TRUE) } ## The following internal functions will soon be removed. ## They will be replaced by the idiom ## model <- update(model, X, ...) updateData <- function(model, X, ...) { UseMethod("updateData") } updateData.default <- function(model, X, ..., warn=TRUE) { ## for some bizarre reason, method dispatch often fails for this function ## so we do it by hand as a backup if(inherits(model, c("ppm", "kppm", "dppm", "slrm"))) { if (requireNamespace("spatstat.model")) { model <- update(model, X) } else if (warn) { warning("Model was not updated; this requires package spatstat.model", call. = FALSE) } } else if(inherits(model, "lppm")) { if (requireNamespace("spatstat.linnet")) { model <- update(model, X) } else if (warn) { warning("Model was not updated; this requires package spatstat.linnet", call. = FALSE) } } else if (warn) { warning("Unrecognised kind of 'model'; no update performed", call. = FALSE) } return(model) } spatstat.explore/R/bw.CvL.adaptive.R0000644000176200001440000000353514611073307017015 0ustar liggesusers#' #' bw.CvL.adaptive.R #' #' $Revision: 1.8 $ $Date: 2022/06/25 04:31:57 $ #' #' Original code by Marie-Colette van Lieshout #' Modified by Adrian Baddeley #' #' Copyright (c) Marie-Colette van Lieshout and Adrian Baddeley 2022 #' GNU Public Licence >= 2.0 bw.CvL.adaptive <- function(X, ..., hrange=NULL, nh=16, h=NULL, bwPilot=bw.scott.iso(X), edge=FALSE, diggle=TRUE) { verifyclass(X, "ppp") W <- Window(X) lW <- area.owin(W) if(!is.null(h)) { stopifnot(is.numeric(h)) stopifnot(all(h > 0)) } else { ## determine range of h if(!is.null(hrange)) { check.range(hrange) if(any(hrange <= 0)) stop("All h values must be positive") } else { nnd <- nndist(X) hrange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) } check.1.integer(nh) stopifnot(nh > 1) h <- geomseq(from=hrange[1L], to=hrange[2L], length.out=nh) } if(!is.null(bwPilot)) { check.1.real(bwPilot) stopifnot(bwPilot > 0) } pdens <- density(X, sigma=bwPilot, edge=TRUE, diggle=TRUE, at="pixels", leaveoneout=FALSE) lp2 <- cv <- numeric(nh) for (i in 1:nh) { lamxi <- adaptive.density(X, h0=h[i], pilot=pdens, method="kernel", edge=edge, diggle=diggle, at = "points", leaveoneout = FALSE, ...) cv[i] <- sum(1/lamxi) lp2[i] <- (cv[i] - lW)^2 } result <- bw.optim(lp2, h, optimum="min", cvname="lp2", hname="h", criterion="Cronie-Van Lieshout", unitname=unitname(X), CvL=cv) return(result) } spatstat.explore/R/envelope3.R0000644000176200001440000000560514611073310016020 0ustar liggesusers# # envelope3.R # # simulation envelopes for pp3 # # $Revision: 1.14 $ $Date: 2022/01/04 05:30:06 $ # envelope.pp3 <- function(Y, fun=K3est, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- K3est if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y) Yintens <- sY$intensity Ydomain <- Y$domain # expression that will be evaluated simexpr <- if(!is.marked(Y)) { # unmarked point pattern expression(rpoispp3(Yintens, domain=Ydomain)) } else { stop("Sorry, simulation of marked 3D point patterns is not yet implemented") } # suppress warnings from code checkers dont.complain.about(Yintens, Ydomain) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, expected.arg=c("rmax", "nrval"), do.pwrong=do.pwrong) } spatstat.explore/R/hopskel.R0000644000176200001440000000510414611073310015557 0ustar liggesusers## ## hopskel.R ## Hopkins-Skellam test ## ## $Revision: 1.3 $ $Date: 2022/05/23 02:33:06 $ hopskel <- function(X) { stopifnot(is.ppp(X)) n <- npoints(X) if(n < 2) return(NA) dX <- nndist(X) U <- runifpoint(n, Window(X)) dU <- nncross(U, X, what="dist") A <- mean(dX^2)/mean(dU^2) return(A) } hopskel.test <- function(X, ..., alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999 ) { Xname <- short.deparse(substitute(X)) verifyclass(X, "ppp") W <- Window(X) n <- npoints(X) method <- match.arg(method) # alternative hypothesis alternative <- match.arg(alternative) if(alternative == "clustered") alternative <- "less" if(alternative == "regular") alternative <- "greater" altblurb <- switch(alternative, two.sided="two-sided", less="clustered (A < 1)", greater="regular (A > 1)") ## compute observed value statistic <- hopskel(X) ## p-value switch(method, asymptotic = { ## F-distribution nn <- 2 * n p.value <- switch(alternative, less = pf(statistic, nn, nn, lower.tail=TRUE), greater = pf(statistic, nn, nn, lower.tail=FALSE), two.sided = 2 * pf(statistic, nn, nn, lower.tail=(statistic < 1))) pvblurb <- "using F distribution" }, MonteCarlo = { ## Monte Carlo p-value check.1.integer(nsim) stopifnot(nsim > 1) sims <- numeric(nsim) for(i in 1:nsim) { Xsim <- runifpoint(n, win=W) sims[i] <- hopskel(Xsim) p.upper <- (1 + sum(sims >= statistic))/(1 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=2*min(p.lower, p.upper)) } pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") }) statistic <- as.numeric(statistic) names(statistic) <- "A" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Hopkins-Skellam test of CSR", pvblurb), data.name=Xname) class(out) <- "htest" return(out) } spatstat.explore/R/idw.R0000644000176200001440000001234314611073310014700 0ustar liggesusers# # idw.R # # Inverse-distance weighted smoothing # # $Revision: 1.14 $ $Date: 2022/05/21 08:53:38 $ idw <- function(X, power=2, at=c("pixels", "points"), ..., se=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) at <- match.arg(at) marx <- marks(X) if(is.data.frame(marx)) { if((nc <- ncol(marx)) > 1) { ## multiple columns of marks - process one-by-one each <- vector(mode="list", length=nc) for(j in 1:nc) each[[j]] <- idw(X %mark% marx[,j], power=power, at=at, ..., se=se) names(each) <- colnames(marx) ## if(!se) { ## estimates only switch(at, pixels = { out <- as.solist(each) }, points = { out <- as.data.frame(each) } ) } else { ## estimates and standard errors est <- lapply(each, getElement, name="estimate") SE <- lapply(each, getElement, name="SE") switch(at, pixels = { out <- list(estimate = as.solist(est), SE = as.solist(SE)) }, points = { out <- list(estimate = as.data.frame(est), SE = as.data.frame(SE)) }) } return(out) } else marx <- marx[,1L] } if(!is.numeric(marx)) stop("Marks must be numeric") check.1.real(power) switch(at, pixels = { ## create grid W <- as.mask(as.owin(X), ...) dim <- W$dim npixels <- prod(dim) ## call C if(!se) { z <- .C(SE_Cidw, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), PACKAGE="spatstat.explore") out <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) out <- out[W, drop=FALSE] } else { z <- .C(SE_Cidw2, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1L]), xstep = as.double(W$xstep), nx = as.integer(dim[2L]), ystart = as.double(W$yrow[1L]), ystep = as.double(W$ystep), ny = as.integer(dim[1L]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), mtwo = as.double(numeric(npixels)), wtwo = as.double(numeric(npixels)), PACKAGE="spatstat.explore") est <- as.im(matrix(z$rat, dim[1L], dim[2L]), W=W) est <- est[W, drop=FALSE] sumw <- z$den sumw2 <- z$wtwo m2 <- z$mtwo varden <- sumw - sumw2/sumw varden[varden <= 0] <- NA SE <- sqrt(m2/varden) SE <- as.im(matrix(SE, dim[1L], dim[2L]), W=W) SE <- SE[W, drop=FALSE] out <- solist(estimate=est, SE=SE) } }, points={ npts <- npoints(X) if(!se) { z <- .C(SE_idwloo, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), PACKAGE="spatstat.explore") out <- z$rat } else { z <- .C(SE_idwloo2, x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), mtwo = as.double(numeric(npts)), wtwo = as.double(numeric(npts)), PACKAGE="spatstat.explore") est <- z$rat sumw <- z$den sumw2 <- z$wtwo m2 <- z$mtwo varden <- sumw - sumw2/sumw varden[varden <= 0] <- NA SE <- sqrt(m2/varden) out <- list(estimate=est, SE=SE) } }) return(out) } spatstat.explore/R/morisita.R0000644000176200001440000000240714611073310015744 0ustar liggesusers# # morisita.R # # $Revision: 1.3 $ $Date: 2020/11/17 01:30:18 $ # miplot <- function(X, ...) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) W <- X$window N <- X$n if(W$type != "rectangle") stop("Window of X is not a rectangle - Morisita index undefined") a <- min(diff(W$xrange), diff(W$yrange)) maxnquad <- floor(a/mean(nndist(X))) if(maxnquad <= 1) stop("Not enough points for a Morisita plot") mindex <- numeric(maxnquad) for(nquad in 1:maxnquad) { qq <- quadratcount(X, nquad, nquad) tt <- as.vector(as.table(qq)) mindex[nquad] <- length(tt) * sum(tt * (tt-1))/(N*(N-1)) } quadsize <- diameter(W)/(1:maxnquad) ok <- (quadsize <= a) quadsize <- quadsize[ok] mindex <- mindex[ok] unitinfo <- summary(unitname(W))$axis do.call(plot.default, resolve.defaults(list(quote(quadsize), quote(mindex)), list(...), list(xlim=c(0,max(quadsize)), ylim=c(0,max(1, mindex)), xlab=paste("Diameter of quadrat", unitinfo), ylab="Morisita index", main=paste("Morisita plot for", Xname)))) abline(h=1, lty=2) return(invisible(NULL)) } spatstat.explore/R/nncorr.R0000644000176200001440000001154414611073310015420 0ustar liggesusers# # nncorr.R # # $Revision: 1.12 $ $Date: 2019/01/22 03:08:57 $ # nnmean <- function(X, k=1, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) m <- numeric.columns(marks(X), logical=TRUE, others="na") ## default result nana <- rep(NA_real_, ncol(m)) ans <- rbind(unnormalised=nana, normalised=nana) ## if(all(is.na(m))) { warning("non-numeric marks; results are NA", call.=FALSE) } else if(k >= npoints(X)) { warning(paste("Not enough points to compute k-th nearest neighbours", paste0(paren(paste0("n = ", npoints(X), ", k = ", k)), ";"), "results are NA"), call.=FALSE) } else { nnid <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok, na.rm=TRUE)) { warning("insufficient data remaining after border correction; results are NA") } else { numer <- sapply(as.data.frame(m[nnid[ok], ]), mean, na.rm=TRUE) denom <- sapply(as.data.frame(m), mean, na.rm=TRUE) ans <- rbind(unnormalised=numer, normalised =numer/denom) } } if(ncol(ans) == 1) ans <- ans[,1,drop=TRUE] return(ans) } nnvario <- local({ nnvario <- function(X, k=1, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) m <- numeric.columns(marks(X), logical=TRUE, others="na") if(all(is.na(m))) warning("non-numeric marks; results are NA", call.=FALSE) ans <- nncorr(X %mark% m, sqdif, k=k, denominator=diag(var(m)), na.action="ignore") return(ans) } sqdif <- function(m1,m2) { ((m1-m2)^2)/2 } nnvario }) nncorr <- function(X, f = function(m1,m2) { m1 * m2}, k=1, ..., use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL, na.action="warn") { stopifnot(is.ppp(X)) if(!is.marked(X, na.action=na.action)) stop("X must be a marked point pattern", call.=FALSE) if(k %% 1 != 0 || length(k) != 1 || k <= 0) stop("k should be a single integer greater than 0", call.=FALSE) if(k >= npoints(X)) stop("Not enough points to compute k-th nearest neighbours") m <- as.data.frame(marks(X)) nv <- ncol(m) if(nv == 1) colnames(m) <- "" # if(missing(method) || is.null(method)) method <- "pearson" # if(missing(f)) f <- NULL if(!is.null(f) && !is.function(f)) { if(nv == 1) stop("f should be a function") # could be a list of functions if(!(is.list(f) && all(unlist(lapply(f, is.function))))) stop("f should be a function or a list of functions") if(length(f) != nv) stop("Length of list f does not match number of mark variables") } # optional denominator(s) if(!is.null(denominator) && !(length(denominator) %in% c(1, nv))) stop("Denominator has incorrect length") # multi-dimensional case if(nv > 1) { # replicate things if(is.function(f)) f <- rep.int(list(f), nv) if(length(denominator) <= 1) denominator <- rep.int(list(denominator), nv) # result <- matrix(NA, nrow=3, ncol=nv) outnames <- c("unnormalised", "normalised", "correlation") dimnames(result) <- list(outnames, colnames(m)) for(j in 1:nv) { mj <- m[,j, drop=FALSE] denj <- denominator[[j]] nncj <- nncorr(X %mark% mj, f=f[[j]], k=k, use=use, method=method, denominator=denj) kj <- length(nncj) result[1:kj,j] <- nncj } if(all(is.na(result[3, ]))) result <- result[1:2, ] return(result) } # one-dimensional m <- m[,1,drop=TRUE] # select 'f' appropriately for X chk <- check.testfun(f, X=X) f <- chk$f ftype <- chk$ftype # denominator Efmm <- if(!is.null(denominator)) denominator else switch(ftype, mul={ mean(m)^2 }, equ={ sum(table(m)^2)/length(m)^2 }, general={ mean(outer(m, m, f, ...)) }) # border method nn <- nnwhich(X, k=k) ok <- (nndist(X, k=k) <= bdist.points(X)) if(!any(ok)) stop("Insufficient data") mY <- m[nn[ok]] mX <- m[ok] Efmk <- switch(ftype, mul = { mean(mX * mY, ...) }, equ = { mean(mX == mY, ...) }, general = { mean(f(mX, mY, ...)) }) # answer <- c(unnormalised=Efmk, normalised=Efmk/Efmm) if(ftype == "mul") { classic <- cor(mX, mY, use=use, method=method) answer <- c(answer, correlation=classic) } return(answer) } spatstat.explore/R/edgeRipley.R0000644000176200001440000002006214611073310016203 0ustar liggesusers# # edgeRipley.R # # $Revision: 1.21 $ $Date: 2022/05/21 08:53:38 $ # # Ripley isotropic edge correction weights # # edge.Ripley(X, r, W) compute isotropic correction weights # for centres X[i], radii r[i,j], window W # # To estimate the K-function see the idiom in "Kest.S" # ####################################################################### edge.Ripley <- local({ small <- function(x) { abs(x) < .Machine$double.eps } hang <- function(d, r) { nr <- nrow(r) nc <- ncol(r) answer <- matrix(0, nrow=nr, ncol=nc) # replicate d[i] over j index d <- matrix(d, nrow=nr, ncol=nc) hit <- (d < r) answer[hit] <- acos(d[hit]/r[hit]) answer } edge.Ripley <- function(X, r, W=Window(X), method=c("C", "interpreted"), maxweight=100, internal=list()) { # X is a point pattern, or equivalent X <- as.ppp(X, W) W <- X$window method <- match.arg(method) debug <- resolve.1.default(list(debug=FALSE), internal) repair <- resolve.1.default(list(repair=TRUE), internal) switch(W$type, rectangle={}, polygonal={ if(method != "C") stop(paste("Ripley isotropic correction for polygonal windows", "requires method = ", dQuote("C"))) }, mask={ stop(paste("sorry, Ripley isotropic correction", "is not implemented for binary masks")) } ) n <- npoints(X) if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { if(length(r) != n) stop("length of r is incompatible with the number of points in X") r <- matrix(r, nrow=n) } # Nr <- nrow(r) Nc <- ncol(r) if(Nr * Nc == 0) return(r) ########## x <- X$x y <- X$y switch(method, interpreted = { ######## interpreted R code for rectangular case ######### # perpendicular distance from point to each edge of rectangle # L = left, R = right, D = down, U = up dL <- x - W$xrange[1L] dR <- W$xrange[2L] - x dD <- y - W$yrange[1L] dU <- W$yrange[2L] - y # detect whether any points are corners of the rectangle corner <- (small(dL) + small(dR) + small(dD) + small(dU) >= 2) # angle between (a) perpendicular to edge of rectangle # and (b) line from point to corner of rectangle bLU <- atan2(dU, dL) bLD <- atan2(dD, dL) bRU <- atan2(dU, dR) bRD <- atan2(dD, dR) bUL <- atan2(dL, dU) bUR <- atan2(dR, dU) bDL <- atan2(dL, dD) bDR <- atan2(dR, dD) # The above are all vectors [i] # Now we compute matrices [i,j] # half the angle subtended by the intersection between # the circle of radius r[i,j] centred on point i # and each edge of the rectangle (prolonged to an infinite line) aL <- hang(dL, r) aR <- hang(dR, r) aD <- hang(dD, r) aU <- hang(dU, r) # apply maxima # note: a* are matrices; b** are vectors; # b** are implicitly replicated over j index cL <- pmin.int(aL, bLU) + pmin.int(aL, bLD) cR <- pmin.int(aR, bRU) + pmin.int(aR, bRD) cU <- pmin.int(aU, bUL) + pmin.int(aU, bUR) cD <- pmin.int(aD, bDL) + pmin.int(aD, bDR) # total exterior angle ext <- cL + cR + cU + cD ext <- matrix(ext, Nr, Nc) # add pi/2 for corners if(any(corner)) ext[corner,] <- ext[corner,] + pi/2 # OK, now compute weight weight <- 1 / (1 - ext/(2 * pi)) }, C = { ############ C code ############################# switch(W$type, rectangle={ if(!debug) { z <- .C(SE_ripleybox, nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1L]), ymin=as.double(W$yrange[1L]), xmax=as.double(W$xrange[2L]), ymax=as.double(W$yrange[2L]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.explore") } else { z <- .C(SE_ripboxDebug, nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1L]), ymin=as.double(W$yrange[1L]), xmax=as.double(W$xrange[2L]), ymax=as.double(W$yrange[2L]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.explore") } weight <- matrix(z$out, nrow=Nr, ncol=Nc) }, polygonal={ Y <- edges(W) bd <- bdist.points(X) if(!debug) { z <- .C(SE_ripleypoly, nc=as.integer(n), xc=as.double(x), yc=as.double(y), bd=as.double(bd), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.explore") } else { z <- .C(SE_rippolDebug, nc=as.integer(n), xc=as.double(x), yc=as.double(y), bd=as.double(bd), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), PACKAGE="spatstat.explore") } angles <- matrix(z$out, nrow = Nr, ncol = Nc) weight <- 2 * pi/angles } ) } ) ## eliminate wild values if(repair) weight <- matrix(pmax.int(1, pmin.int(maxweight, weight)), nrow=Nr, ncol=Nc) return(weight) } edge.Ripley }) rmax.Ripley <- function(W) { W <- as.owin(W) if(is.rectangle(W)) return(boundingradius(W)) if(is.polygonal(W) && length(W$bdry) == 1L) return(boundingradius(W)) ## could have multiple connected components pieces <- tiles(tess(image=connected(W))) answer <- sapply(pieces, boundingradius) return(as.numeric(answer)) } spatstat.explore/R/pairorient.R0000644000176200001440000001624014611073310016271 0ustar liggesusers## ## pairorient.R ## ## point pair orientation distribution ## ## Function O_{r1,r2}(phi) defined in ## Stoyan & Stoyan (1994) equ (14.53) page 271 ## ## and its derivative estimated by kernel smoothing ## ## $Revision: 1.12 $ $Date: 2022/06/27 07:45:30 $ pairorient <- function(X, r1, r2, ..., cumulative=FALSE, correction, ratio=FALSE, unit=c("degree", "radian"), domain=NULL) { stopifnot(is.ppp(X)) check.1.real(r1) check.1.real(r2) stopifnot(r1 < r2) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate", "isotropic") correction <- pickoption("correction", correction, c(none="none", border="border", bord.modif="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## Find close pairs in range [r1, r2] close <- as.data.frame(closepairs(X, r2)) ok <- with(close, r1 <= d & d <= r2) if(!is.null(domain)) ok <- ok & with(close, inside.owin(xi, yi, domain)) if(!any(ok)) { warning(paste("There are no pairs of points in the distance range", prange(c(r1,r2)))) return(NULL) } close <- close[ok, , drop=FALSE] ANGLE <- with(close, atan2(dy, dx) * Convert) %% FullCircle nangles <- length(ANGLE) ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") Oletter <- if(cumulative) "O" else "o" Osymbol <- as.name(Oletter) OO <- ratfv(Odf, NULL, denom=nangles, argu="phi", ylab=substitute(fn[R1,R2](phi), list(R1=r1, R2=r2, fn=Osymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=c(Oletter, paste0("list(", r1, ",", r2, ")")), yexp=substitute(fn[list(R1,R2)](phi), list(R1=r1,R2=r2,fn=Osymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate OO <- bind.ratfv(OO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(c("border", "bord.modif") %in% correction)) { ## border type corrections bX <- bdist.points(X) bI <- bX[close$i] if("border" %in% correction) { bok <- (bI > r2) ANGLEok <- ANGLE[bok] nok <- length(ANGLEok) if(cumulative) { wh <- whist(ANGLEok, breaks$val) num.bord <- cumsum(wh) } else { kd <- circdensity(ANGLEok, ..., n=Nphi, unit=unit) num.bord <- kd$y * nok } den.bord <- nok OO <- bind.ratfv(OO, data.frame(border=num.bord), den.bord, "{hat(%s)[%s]^{bord}}(phi)", "border-corrected estimate of %s", "border", ratio=ratio) } if("bord.modif" %in% correction) { ok <- (close$d < bI) nok <- sum(ok) inradius <- max(distmap(W, invert=TRUE)) rrr <- range(r2, inradius) rr <- seq(rrr[1], rrr[2], length=256) Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- (Arf(bX))[close$i] edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } den.bm <- nok OO <- bind.ratfv(OO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.trans <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.trans <- kd$y * nangles } den.trans <- nangles OO <- bind.ratfv(OO, data.frame(trans=num.trans), den.trans, "{hat(%s)[%s]^{trans}}(phi)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) DIJ <- close$d edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.iso <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.iso <- kd$y * nangles } den.iso <- nangles OO <- bind.ratfv(OO, data.frame(iso=num.iso), den.iso, "{hat(%s)[%s]^{iso}}(phi)", "Ripley isotropic-corrected estimate of %s", "iso", ratio=ratio) } unitname(OO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(OO) } spatstat.explore/R/Kmeasure.R0000644000176200001440000004406514611073307015705 0ustar liggesusers# # Kmeasure.R # # $Revision: 1.75 $ $Date: 2023/03/15 13:41:48 $ # # Kmeasure() compute an estimate of the second order moment measure # # Kest.fft() use Kmeasure() to form an estimate of the K-function # # second.moment.calc() underlying algorithm # # second.moment.engine() underlying underlying algorithm! # Kmeasure <- function(X, sigma, edge=TRUE, ..., varcov=NULL) { stopifnot(is.ppp(X)) sigma.given <- !missing(sigma) && !is.null(sigma) varcov.given <- !is.null(varcov) ngiven <- sigma.given + varcov.given if(ngiven == 2) stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) if(ngiven == 0) stop(paste("Please specify smoothing bandwidth", sQuote("sigma"), "or", sQuote("varcov"))) if(varcov.given) { stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) sigma <- NULL } else { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1,2)) stopifnot(all(sigma > 0)) if(length(sigma) == 2) { varcov <- diag(sigma^2) sigma <- NULL } } second.moment.calc(x=X, sigma=sigma, edge=edge, what="Kmeasure", varcov=varcov, ...) } second.moment.calc <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., varcov=NULL, expand=FALSE, obswin, npts=NULL, debug=FALSE) { if(is.null(sigma) && is.null(varcov)) stop("must specify sigma or varcov") obswin.given <- !missing(obswin) what <- match.arg(what) sig <- if(!is.null(sigma)) sigma else max(c(diag(varcov), sqrt(det(varcov)))) xtype <- if(is.ppp(x)) "ppp" else if(is.im(x)) "im" else if(inherits(x, "imlist")) "imlist" else if(all(sapply(x, is.im))) "imlist" else stop("x should be a point pattern or a pixel image") nimages <- switch(xtype, ppp = 1, im = 1, imlist = length(x)) win <- if(nimages == 1) as.owin(x) else as.owin(x[[1]]) win <- rescue.rectangle(win) rec <- as.rectangle(win) across <- min(diff(rec$xrange), diff(rec$yrange)) # determine whether to expand window if(!expand || (6 * sig < across)) { if(!obswin.given) obswin <- NULL result <- second.moment.engine(x, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=obswin, npts=npts, varcov=varcov) return(result) } #' need to expand window wid <- (7 * sig - across)/2 bigger <- grow.rectangle(rec, wid) switch(xtype, ppp = { # pixellate first (to preserve pixel resolution) X <- pixellate(x, ..., padzero=TRUE) np <- npoints(x) }, im = { X <- x np <- NULL }, imlist = { X <- x np <- NULL }) # Now expand if(nimages == 1) { X <- rebound.im(X, bigger) X <- na.handle.im(X, 0) } else { X <- lapply(X, rebound.im, rect=bigger) X <- lapply(X, na.handle.im, na.replace=0) } ## handle override arguments ow <- if(obswin.given) obswin else win # may be NULL if given if(!is.null(npts)) np <- npts ## Compute! out <- second.moment.engine(X, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=ow, varcov=varcov, npts=np) # Now clip it fbox <- shift(rec, origin="midpoint") if(nimages == 1) { result <- switch(what, kernel = out[fbox], smooth = out[win], Kmeasure = out[fbox], Bartlett = out[fbox], edge = out[win], smoothedge = list(smooth=out$smooth[win], edge =out$edge[win]), all = list(kernel=out$kernel[fbox], smooth=out$smooth[win], Kmeasure=out$Kmeasure[fbox], Bartlett=out$Bartlett[fbox], edge=out$edge[win])) } else { result <- switch(what, kernel = out[fbox], smooth = lapply(out, "[", i=win), Kmeasure = lapply(out, "[", i=fbox), Bartlett = lapply(out, "[", i=fbox), edge = out[win], smoothedge = list( smooth = lapply(out$smooth, "[", i=win), edge = out$edge[win]), all = list( kernel=out$kernel[fbox], smooth=lapply(out$smooth, "[", i=win), Kmeasure=lapply(out$Kmeasure, "[", i=fbox), Bartlett=lapply(out$Bartlett, "[", i=fbox), edge=out$edge[win])) } return(result) } second.moment.engine <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., kernel="gaussian", scalekernel=is.character(kernel), kerpow=1, obswin = as.owin(x), varcov=NULL, npts=NULL, debug=FALSE, fastgauss=FALSE) { what <- match.arg(what) validate2Dkernel(kernel) obswin.given <- !missing(obswin) && !is.null(obswin) is.second.order <- what %in% c("Kmeasure", "Bartlett", "all") needs.kernel <- what %in% c("kernel", "all", "Kmeasure") returns.several <- what %in% c("all", "smoothedge") # check whether Fastest Fourier Transform in the West is available west <- fftwAvailable() if(returns.several) result <- list() # several results will be returned in a list if(is.ppp(x)) { # convert list of points to mass distribution X <- pixellate(x, ..., padzero=TRUE) if(is.null(npts)) npts <- npoints(x) } else X <- x if(is.im(X)) { Xlist <- list(X) nimages <- 1 } else if(all(unlist(lapply(X, is.im)))) { Xlist <- X X <- Xlist[[1]] nimages <- length(Xlist) blanklist <- vector(mode="list", length=nimages) names(blanklist) <- names(Xlist) } else stop("internal error: unrecognised format for x") unitsX <- unitname(X) xstep <- X$xstep ystep <- X$ystep ## ensure obswin has same bounding frame as X if(!obswin.given) { obswin <- Window(x) } else if(!identical(Frame(obswin), Frame(X))) { obswin <- rebound.owin(obswin, as.rectangle(X)) } # go to work Y <- X$v Ylist <- lapply(Xlist, getElement, name="v") # pad with zeroes nr <- nrow(Y) nc <- ncol(Y) Ypad <- matrix(0, ncol=2*nc, nrow=2*nr) Ypadlist <- rep(list(Ypad), nimages) for(i in 1:nimages) Ypadlist[[i]][1:nr, 1:nc] <- Ylist[[i]] Ypad <- Ypadlist[[1]] lengthYpad <- 4 * nc * nr # corresponding coordinates xcol.pad <- X$xcol[1] + xstep * (0:(2*nc-1)) yrow.pad <- X$yrow[1] + ystep * (0:(2*nr-1)) # compute kernel and its Fourier transform if(fastgauss && !needs.kernel && identical(kernel, "gaussian") && (kerpow == 1) && is.numeric(sigma) && (length(sigma) == 1)) { #' compute Fourier transform of kernel directly (*experimental*) ii <- c(0:(nr-1), nr:1) jj <- c(0:(nc-1), nc:1) cc <- -(sigma^2 * pi^2)/2 ww <- sidelengths(Frame(X))^2 uu <- exp(ii^2 * cc/ww[2]) vv <- exp(jj^2 * cc/ww[1]) fK <- outer(uu, vv, "*") } else { # set up kernel xcol.ker <- xstep * c(0:(nc-1),-(nc:1)) yrow.ker <- ystep * c(0:(nr-1),-(nr:1)) #' kerpixarea <- xstep * ystep if(identical(kernel, "gaussian")) { if(!is.null(sigma)) { densX.ker <- dnorm(xcol.ker, sd=sigma) densY.ker <- dnorm(yrow.ker, sd=sigma) #' WAS: Kern <- outer(densY.ker, densX.ker, "*") * kerpixarea Kern <- outer(densY.ker, densX.ker, "*") Kern <- Kern/sum(Kern) } else if(!is.null(varcov)) { ## anisotropic kernel Sinv <- solve(varcov) halfSinv <- Sinv/2 #' WAS: #' detSigma <- det(varcov) #' constker <- kerpixarea/(2 * pi * sqrt(detSigma)) xsq <- matrix((xcol.ker^2)[col(Ypad)], ncol=2*nc, nrow=2*nr) ysq <- matrix((yrow.ker^2)[row(Ypad)], ncol=2*nc, nrow=2*nr) xy <- outer(yrow.ker, xcol.ker, "*") #' WAS: Kern <- constker * exp(.... Kern <- exp(-(xsq * halfSinv[1,1] + xy * (halfSinv[1,2]+halfSinv[2,1]) + ysq * halfSinv[2,2])) Kern <- Kern/sum(Kern) } else stop("Must specify either sigma or varcov") } else { ## non-Gaussian kernel ## evaluate kernel at array of points xker <- as.vector(xcol.ker[col(Ypad)]) yker <- as.vector(yrow.ker[row(Ypad)]) #' WAS: Kern <- kerpixarea * evaluate2Dkernel(... Kern <- evaluate2Dkernel(kernel, xker, yker, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) if(!all(ok <- is.finite(Kern))) { if(anyNA(Kern)) stop("kernel function produces NA values") if(any(is.nan(Kern))) stop("kernel function produces NaN values") ra <- range(Kern[ok]) Kern[Kern == Inf] <- ra[2] Kern[Kern == -Inf] <- ra[1] } Kern <- matrix(Kern, ncol=2*nc, nrow=2*nr) Kern <- Kern/sum(Kern) } if(what %in% c("kernel", "all")) { ## kernel will be returned ## first rearrange it into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(debug) { if(any(fave.order(xcol.ker) != rtwist)) splat("something round the twist") } Kermit <- Kern[ rtwist, ctwist] ker <- im(Kermit, xcol.ker[ctwist], yrow.ker[ rtwist], unitname=unitsX) if(what == "kernel") return(ker) else result$kernel <- ker } ## optionally raise kernel to a power (e.g. for variance calculations) if(kerpow != 1) { ## convert probability mass to density pixarea <- xstep * ystep Kern <- Kern/pixarea ## raise to exponent (this is numerically more stable) Kern <- Kern^kerpow ## convert back to unnormalised masses Kern <- Kern * pixarea } ## convolve using fft fK <- fft2D(Kern, west=west) } if(what != "edge") { if(nimages == 1) { fY <- fft2D(Ypad, west=west) sm <- fft2D(fY * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("smooth: maximum imaginary part=", signif(max(Im(sm)),3)) if(!is.null(npts)) splat("smooth: mass error=", signif(sum(Mod(sm))-npts,3)) } } else { fYlist <- smlist <- blanklist for(i in 1:nimages) { fYlist[[i]] <- fY.i <- fft2D(Ypadlist[[i]], west=west) smlist[[i]] <- sm.i <- fft2D(fY.i * fK, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("smooth component", i, ": maximum imaginary part=", signif(max(Im(sm.i)),3)) if(!is.null(npts)) splat("smooth component", i, ": mass error=", signif(sum(Mod(sm.i))-npts,3)) } } } } if(what %in% c("smooth", "all", "smoothedge")) { # compute smoothed point pattern without edge correction if(nimages == 1) { smo <- im(Re(sm)[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "smooth") { return(smo) } else { result$smooth <- smo } } else { smolist <- blanklist for(i in 1:nimages) smolist[[i]] <- im(Re(smlist[[i]])[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) smolist <- as.solist(smolist) if(what == "smooth") { return(smolist) } else { result$smooth <- smolist } } } if(is.second.order) { # compute Bartlett spectrum if(nimages == 1) { bart <- BartCalc(fY, fK) ## bart <- Mod(fY)^2 * fK } else { bartlist <- lapply(fYlist, BartCalc, fK=fK) } } if(what %in% c("Bartlett", "all")) { # Bartlett spectrum will be returned # rearrange into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { Bart <- bart[ rtwist, ctwist] Bartlett <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) if(what == "Bartlett") return(Bartlett) else result$Bartlett <- Bartlett } else { Bartlist <- blanklist for(i in 1:nimages) { Bart <- (bartlist[[i]])[ rtwist, ctwist] Bartlist[[i]] <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) } Bartlist <- as.solist(Bartlist) if(what == "Bartlett") return(Bartlist) else result$Bartlett <- Bartlist } } #### ------- Second moment measure -------------- # if(is.second.order) { if(nimages == 1) { mom <- fft2D(bart, inverse=TRUE, west=west)/lengthYpad if(debug) { splat("2nd moment measure: maximum imaginary part=", signif(max(Im(mom)),3)) if(!is.null(npts)) splat("2nd moment measure: mass error=", signif(sum(Mod(mom))-npts^2, 3)) } mom <- Mod(mom) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom <- mom - npts* Kern } else { momlist <- blanklist for(i in 1:nimages) { mom.i <- fft2D(bartlist[[i]], inverse=TRUE, west=west)/lengthYpad if(debug) { splat("2nd moment measure: maximum imaginary part=", signif(max(Im(mom.i)),3)) if(!is.null(npts)) splat("2nd moment measure: mass error=", signif(sum(Mod(mom.i))-npts^2, 3)) } mom.i <- Mod(mom.i) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom.i <- mom.i - npts* Kern momlist[[i]] <- mom.i } } } # edge correction if(edge || what %in% c("edge", "all", "smoothedge")) { M <- as.mask(obswin, xy=list(x=X$xcol, y=X$yrow))$m # previous line ensures M has same dimensions and scale as Y Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft2D(Mpad, west=west) if(edge && is.second.order) { # compute kernel-smoothed set covariance # apply edge correction co <- fft2D(Mod(fM)^2 * fK, inverse=TRUE, west=west)/lengthMpad co <- Mod(co) a <- sum(M) wt <- a/co me <- spatstat.options("maxedgewt") weight <- matrix(pmin.int(me, wt), ncol=2*nc, nrow=2*nr) # apply edge correction to second moment measure if(nimages == 1) { mom <- mom * weight # set to NA outside 'reasonable' region mom[wt > 10] <- NA } else { wgt10 <- (wt > 10) for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * weight # set to NA outside 'reasonable' region mom.i[wgt10] <- NA momlist[[i]] <- mom.i } } } } if(is.second.order) { # rearrange second moment measure # into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { mom <- mom[ rtwist, ctwist] } else { momlist <- lapply(momlist, "[", i=rtwist, j=ctwist) } if(debug) { if(any(fave.order(xcol.ker) != rtwist)) splat("internal error: something round the twist") } } if(what %in% c("edge", "all", "smoothedge")) { # return convolution of window with kernel # (evaluated inside window only) con <- fft2D(fM * fK, inverse=TRUE, west=west)/lengthMpad edg <- Mod(con[1:nr, 1:nc]) edg <- im(edg, xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "edge") return(edg) else result$edge <- edg } if(what == "smoothedge") return(result) # Second moment measure, density estimate # Divide by number of points * lambda and convert mass to density pixarea <- xstep * ystep if(nimages == 1) { mom <- mom * area(obswin) / (pixarea * npts^2) # this is the second moment measure mm <- im(mom, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) if(what == "Kmeasure") return(mm) else result$Kmeasure <- mm } else { ccc <- area(obswin) / (pixarea * npts^2) mmlist <- blanklist for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * ccc # this is the second moment measure mmlist[[i]] <- im(mom.i, xcol.ker[ctwist], yrow.ker[rtwist], unitname=unitsX) } mmlist <- as.solist(mmlist) if(what == "Kmeasure") return(mmlist) else result$Kmeasure <- mmlist } # what = "all", so return all computed objects return(result) } BartCalc <- function(fY, fK) { Mod(fY)^2 * fK } Kest.fft <- function(X, sigma, r=NULL, ..., breaks=NULL) { verifyclass(X, "ppp") W <- Window(X) lambda <- npoints(X)/area(W) rmaxdefault <- rmax.rule("K", W, lambda) bk <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) breaks <- bk$val rvalues <- bk$r u <- Kmeasure(X, sigma, ...) xx <- rasterx.im(u) yy <- rastery.im(u) rr <- sqrt(xx^2 + yy^2) tr <- whist(rr, breaks, u$v) K <- cumsum(tr) * with(u, xstep * ystep) rmax <- min(rr[is.na(u$v)]) K[rvalues >= rmax] <- NA result <- data.frame(r=rvalues, theo=pi * rvalues^2, border=K) w <- X$window alim <- c(0, min(diff(w$xrange), diff(w$yrange))/4) out <- fv(result, "r", quote(K(r)), "border", . ~ r, alim, c("r", "%s[pois](r)", "hat(%s)[fb](r)"), c("distance argument r", "theoretical Poisson %s", "border-corrected FFT estimate of %s"), fname="K", unitname=unitname(X) ) return(out) } spatstat.explore/R/bw.CvLHeat.R0000644000176200001440000000146514611073311016016 0ustar liggesusers#' #' bw.CvLHeat #' #' Cronie-van Lieshout bandwidth selection for Diffusion smoothing #' #' Copyright (c) 2020 Adrian Baddeley, Tilman Davies and Suman Rakshit #' GNU Public Licence >= 2.0 bw.CvLHeat <- function(X, ..., srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose=TRUE) { #' compute intensity estimates b <- HeatEstimates.ppp(X, ..., srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) lambda <- b$lambda h <- b$h hname <- b$hname #' compute Cronie-van Lieshout criterion AW <- area.owin(Window(X)) CV <- (rowSums(1/lambda) - AW)^2 iopt <- which.min(CV) result <- bw.optim(CV, h, iopt, criterion="Cronie-van Lieshout criterion", hname=hname) return(result) } spatstat.explore/R/Fest.R0000644000176200001440000001427514611073307015032 0ustar liggesusers# # Fest.R # # Computes estimates of the empty space function # # $Revision: 4.47 $ $Date: 2019/11/01 01:32:28 $ # Fhazard <- function(X, ...) { Z <- Fest(X, ...) if(!any(names(Z) == "km")) stop("Kaplan-Meier estimator 'km' is required for hazard rate") conserve <- attr(Z, "conserve") ## strip off Poisson F Z <- Z[, (colnames(Z) != "theo")] ## relabel the fv object Z <- rebadge.fv(Z, new.ylab=quote(h(r)), new.fname="h", tags=c("hazard", "theohaz"), new.tags=c("hazard", "theo"), new.labl=c("hat(%s)[km](r)", "%s[pois](r)"), new.desc=c( "Kaplan-Meier estimate of %s", "theoretical Poisson %s"), new.dotnames=c("hazard", "theo"), new.preferred="hazard") ## strip off unwanted bits Z <- Z[, c("r", "hazard", "theo")] attr(Z, "conserve") <- conserve return(Z) } Fest <- function(X, ..., eps = NULL, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(list(...)$checkspacing) testme <- isTRUE(list(...)$testme) ## Intensity estimate W <- Window(X) npts <- npoints(X) lambda <- npts/area(W) ## Discretise window dwin <- as.mask(W, eps=eps) dX <- ppp(X$x, X$y, window=dwin, check=FALSE) ## Histogram breakpoints rmaxdefault <- rmax.rule("F", dwin, lambda) breaks <- handle.r.b.args(r, breaks, dwin, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(testme || (rorbgiven && checkspacing)) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, dwin, rmaxdefault=rmaxdefault, action="fatal", rname="r", context="in Fest(X, r)") ## choose correction(s) if(is.null(correction)) { correction <- c("rs", "km", "cs") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", cs="cs", ChiuStoyan="cs", Hanisch="cs", han="cs", best="km"), multi=TRUE) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(F(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="F") nr <- length(rvals) zeroes <- numeric(nr) ## compute distances and censoring distances if(X$window$type == "rectangle") { ## original data were in a rectangle ## output of exactdt() is sufficient e <- exactdt(dX) dist <- e$d bdry <- e$b if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) dist <- dist[ok] bdry <- bdry[ok] } } else { ## window is irregular.. # Distance transform & boundary distance for all pixels e <- exactdt(dX) b <- bdist.pixels(dX$window, style="matrix") ## select only those pixels inside mask mm <- dwin$m if(!is.null(domain)) { ok <- inside.owin(raster.xy(e$w), , domain) mm <- as.vector(mm) & ok } dist <- e$d[mm] bdry <- b[mm] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## start calculating estimates of F if("none" %in% correction) { ## UNCORRECTED e.d.f. of empty space distances if(npts == 0) edf <- zeroes else { hh <- hist(dist[dist <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(dist) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("cs" %in% correction) { ## Chiu-Stoyan correction if(npts == 0) cs <- zeroes else { ## uncensored distances x <- dist[d] ## weights a <- eroded.areas(W, rvals) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts H <- cumsum(h/a) cs <- H/max(H[is.finite(H)]) } ## add to fv object Z <- bind.fv(Z, data.frame(cs=cs), "hat(%s)[cs](r)", "Chiu-Stoyan estimate of %s", "cs") } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km, want.km, want.km) tags <- c("rs", "km", "hazard", "theohaz")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard h(r)")[selection] if(npts == 0) { result <- as.data.frame(matrix(0, nr, length(tags))) names(result) <- tags } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) result$theohaz <- 2 * pi * lambda * rvals result <- as.data.frame(result[tags]) } ## add to fv object Z <- bind.fv(Z, result, labels, descr, if(want.km) "km" else "rs") } ## wrap up unitname(Z) <- unitname(X) ## remove 'hazard' from the dotnames nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) ## determine recommended plot range attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.9])) ## arguments to be used in envelope, etc attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } spatstat.explore/R/bw.frac.R0000644000176200001440000000142714611073307015446 0ustar liggesusers#' #' bw.frac.R #' #' $Revision: 1.2 $ $Date: 2020/12/19 05:25:06 $ bw.frac <- function(X, ..., f=1/4) { X <- as.owin(X) g <- distcdf(X, ...) r <- with(g, .x) Fr <- with(g, .y) iopt <- min(which(Fr >= f)) ropt <- r[iopt] attr(ropt, "f") <- f attr(ropt, "g") <- g class(ropt) <- c("bw.frac", class(ropt)) return(ropt) } print.bw.frac <- function(x, ...) { print(as.numeric(x), ...) } plot.bw.frac <- function(x, ...) { xname <- short.deparse(substitute(x)) g <- attr(x, "g") f <- attr(x, "f") ropt <- as.numeric(x) dont.complain.about(g) do.call(plot, resolve.defaults(list(quote(g)), list(...), list(main=xname))) abline(v=ropt, lty=3) abline(h=f, lty=3) invisible(NULL) } spatstat.explore/R/Math.fasp.R0000644000176200001440000000377314611073307015753 0ustar liggesusers## ## Math.fv.R ## ## Inline arithmetic for 'fasp' ## ## $Revision: 1.4 $ $Date: 2023/05/13 01:11:06 $ Math.fasp <- function(x, ...){ force(x) eval(substitute(eval.fasp(G(x)), list(G=as.name(.Generic), x=quote(x)))) } Complex.fasp <- function(z){ force(z) eval(substitute(eval.fasp(G(z)), list(G=as.name(.Generic), z=quote(z)))) } Ops.fasp <- function(e1,e2=NULL) { m <- match.call() objects <- list() if(is.name(m$e1) || (is.atomic(m$e1) && length(m$e1) == 1)) { ## e1 is the name of a fasp object, or is a single value. ## It will appear directly in the resulting function names e1use <- substitute(e1) } else { ## e1 is an expression that should first be evaluated ## It will appear as 'e1' in the resulting function names e1use <- quote(e1) objects$e1 <- eval(e1) } if(is.name(m$e2) || (is.atomic(m$e2) && length(m$e2) == 1)) { e2use <- substitute(e2) } else { e2use <- quote(e2) objects$e2 <- eval(e2) } callframe <- parent.frame() evalframe <- if(length(objects)) list2env(objects, parent=callframe) else callframe eval(substitute(eval.fasp(G(e1,e2), envir=evalframe), list(G=as.name(.Generic), e1=e1use, e2=e2use))) } Summary.fasp <- local({ Summary.fasp <- function(..., na.rm=FALSE){ argh <- list(...) arrays <- sapply(argh, inherits, what="fasp") argh[arrays] <- lapply(argh[arrays], processArray, op=.Generic, na.rm=na.rm) funs <- sapply(argh, is.fv) if(any(funs)) argh[funs] <- lapply(argh[funs], .Generic, na.rm=na.rm) do.call(.Generic, c(argh, list(na.rm = na.rm))) } processArray <- function(x, op, na.rm=FALSE) { ## extract individual fv objects and apply operation 'op' y <- unlist(lapply(x$fns, op, na.rm=na.rm)) ## apply 'op' to the results do.call(op, c(y, list(na.rm=na.rm))) } Summary.fasp }) spatstat.explore/R/fasp.R0000644000176200001440000001023614611073310015045 0ustar liggesusers# # fasp.R # # $Revision: 1.37 $ $Date: 2023/04/05 07:06:22 $ # # #----------------------------------------------------------------------------- # # creator fasp <- function(fns, which, formulae=NULL, dataname=NULL, title=NULL, rowNames=NULL, colNames=NULL, checkfv=TRUE) { stopifnot(is.list(fns)) stopifnot(is.matrix(which)) stopifnot(length(fns) == length(which)) n <- length(which) if(checkfv && !all(sapply(fns, is.fv))) stop("Some entries of 'fns' are not objects of class 'fv'", call.=FALSE) fns <- as.anylist(fns) # set row and column labels if(!is.null(rowNames)) rownames(which) <- rowNames if(!is.null(colNames)) colnames(which) <- colNames if(!is.null(formulae)) { # verify format and convert to character vector formulae <- FormatFaspFormulae(formulae, "formulae") # ensure length matches length of "fns" if(length(formulae) == 1L && n > 1L) # single formula - replicate it formulae <- rep.int(formulae, n) else stopifnot(length(formulae) == length(which)) } rslt <- list(fns=fns, which=which, default.formula=formulae, dataname=dataname, title=title) class(rslt) <- "fasp" return(rslt) } # subset extraction operator "[.fasp" <- function(x, I, J, drop=TRUE, ...) { verifyclass(x, "fasp") m <- nrow(x$which) n <- ncol(x$which) if(missing(I)) I <- 1:m if(missing(J)) J <- 1:n if(!is.vector(I) || !is.vector(J)) stop("Subset operator is only implemented for vector indices") # determine index subset for lists 'fns', 'titles' etc included <- rep.int(FALSE, length(x$fns)) w <- as.vector(x$which[I,J]) if(length(w) == 0) stop("result is empty") included[w] <- TRUE # if only one cell selected, and drop=TRUE: if((sum(included) == 1L) && drop) return(x$fns[included][[1L]]) # determine positions in shortened lists whichIJ <- x$which[I,J,drop=FALSE] newk <- cumsum(included) newwhich <- matrix(newk[whichIJ], ncol=ncol(whichIJ), nrow=nrow(whichIJ)) rownames(newwhich) <- rownames(x$which)[I] colnames(newwhich) <- colnames(x$which)[J] # default plotting formulae - could be NULL deform <- x$default.formula # create new fasp object Y <- fasp(fns = x$fns[included], formulae = if(!is.null(deform)) deform[included] else NULL, which = newwhich, dataname = x$dataname, title = x$title) return(Y) } dim.fasp <- function(x) { dim(x$which) } # print method print.fasp <- function(x, ...) { verifyclass(x, "fasp") cat(paste("Function array (class", sQuote("fasp"), ")\n")) dim <- dim(x$which) cat(paste("Dimensions: ", dim[1L], "x", dim[2L], "\n")) cat(paste("Title:", if(is.null(x$title)) "(None)" else x$title, "\n")) invisible(NULL) } # other methods as.fv.fasp <- function(x) do.call(cbind.fv, x$fns) dimnames.fasp <- function(x) { return(dimnames(x$which)) } "dimnames<-.fasp" <- function(x, value) { w <- x$which dimnames(w) <- value x$which <- w return(x) } ## other functions FormatFaspFormulae <- local({ zapit <- function(x, argname) { if(inherits(x, "formula")) deparse(x) else if(is.character(x)) x else stop(paste("The entries of", sQuote(argname), "must be formula objects or strings")) } FormatFaspFormulae <- function(f, argname) { ## f should be a single formula object, a list of formula objects, ## a character vector, or a list containing formulae and strings. ## It will be converted to a character vector. result <- if(is.character(f)) f else if(inherits(f, "formula")) deparse(f) else if(is.list(f)) unlist(lapply(f, zapit, argname=argname)) else stop(paste(sQuote(argname), "should be a formula, a list of formulae,", "or a character vector")) return(result) } FormatFaspFormulae }) spatstat.explore/R/metriccontact.R0000644000176200001440000000627614611073310016764 0ustar liggesusers#' #' metriccontact.R #' #' Metric contact distribution #' (corresponding distance transforms are defined in 'metricPdt.R') #' #' $Revision: 1.1 $ $Date: 2020/11/29 07:41:37 $ rectcontact <- function(X, ..., asp=1.0, npasses=4, eps=NULL, r=NULL, breaks=NULL, correction=c("rs", "km")) { verifyclass(X, "im") rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(list(...)$checkspacing) testme <- isTRUE(list(...)$testme) check.1.real(asp) stopifnot(asp > 0) if(X$type != "logical") stop("X should be a logical-valued image") if(!missing(eps)) X <- as.im(X, eps=eps) W <- as.mask(X) # the region that is defined Y <- solutionset(X) # the region that is TRUE fullframe <- all(W$m) emptyframe <- !any(W$m) ## histogram breakpoints rmaxdefault <- rmax.rule("F", W) breaks <- handle.r.b.args(r, breaks, W, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max if(testme || (rorbgiven && checkspacing)) check.finespacing(rvals, if(is.null(eps)) NULL else eps/4, W, rmaxdefault=if(rorbgiven) NULL else rmaxdefault, action="fatal", rname="r", context="in rectcontact(X, r)") correction <- pickoption("correction", correction, c(border="rs", rs="rs", KM="km", km="km", Kaplan="km", best="km"), multi=TRUE) ## compute distances and censoring distances if(!emptyframe) { dist <- rectdistmap(Y, asp, npasses=npasses) if(fullframe) { bdry <- attr(dist, "bdist") } else { bdry <- rectdistmap(complement.owin(W), asp, npasses=npasses) } #' extract corresponding values dist <- dist[W, drop=TRUE, rescue=FALSE] bdry <- bdry[W, drop=TRUE, rescue=FALSE] ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) } ## calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km) tags <- c("rs", "km")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s")[selection] if(emptyframe) { df <- as.data.frame(matrix(0, length(rvals), length(tags))) names(df) <- tags } else { df <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) df <- as.data.frame(df[tags]) } ## create fv object df <- cbind(data.frame(r=rvals), df) Z <- fv(df, "r", quote(H(r)), if(want.km) "km" else "rs", . ~ r, c(0,rmax), c("r", labels), c("distance argument r", descr), fname="H") fvnames(Z, ".") <- rev(fvnames(Z, ".")) attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.95])) attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } spatstat.explore/R/eval.fasp.R0000644000176200001440000000561214611073310015775 0ustar liggesusers# # eval.fasp.R # # # eval.fasp() Evaluate expressions involving fasp objects # # compatible.fasp() Check whether two fasp objects are compatible # # $Revision: 1.13 $ $Date: 2023/03/18 10:25:19 $ # eval.fasp <- local({ eval.fasp <- function(expr, envir, dotonly=TRUE) { #' convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) #' convert syntactic expression to call ## elang <- substitute(expr) #' find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") ## get the actual variables if(missing(envir)) { envir <- sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames ## find out which ones are fasp objects isfasp <- unlist(lapply(vars, inherits, what="fasp")) if(!any(isfasp)) stop("No fasp objects in this expression") fasps <- vars[isfasp] nfasps <- length(fasps) ## test whether the fasp objects are compatible if(nfasps > 1L && !(do.call(compatible, unname(fasps)))) stop(paste(if(nfasps > 2) "some of" else NULL, "the objects", commasep(sQuote(names(fasps))), "are not compatible")) ## copy first object as template result <- fasps[[1L]] which <- result$which nr <- nrow(which) nc <- ncol(which) ## create environment for evaluation fenv <- new.env() ## for each [i,j] extract fv objects and evaluate expression for(i in seq_len(nr)) for(j in seq_len(nc)) { ## extract fv objects at position [i,j] funs <- lapply(fasps, getpanel, i=i, j=j) ## insert into list of argument values vars[isfasp] <- funs ## assign them into the right environment for(k in seq_along(vars)) assign(varnames[k], vars[[k]], envir=fenv) ## evaluate resultij <- eval(substitute(eval.fv(ee,ff,dd), list(ee=e[[1]], ff=fenv, dd=dotonly))) ## insert back into fasp result$fns[[which[i,j] ]] <- resultij } result$title <- paste("Result of eval.fasp(", e, ")", sep="") return(result) } getpanel <- function(x, i, j) { as.fv(x[i,j]) } eval.fasp }) compatible.fasp <- function(A, B, ...) { verifyclass(A, "fasp") if(missing(B)) return(TRUE) verifyclass(B, "fasp") dimA <- dim(A$which) dimB <- dim(B$which) if(!all(dimA == dimB)) return(FALSE) for(i in seq_len(dimA[1L])) for(j in seq_len(dimA[2L])) { Aij <- as.fv(A[i,j]) Bij <- as.fv(B[i,j]) if(!compatible.fv(Aij, Bij)) return(FALSE) } # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.fasp(B, ...)) } spatstat.explore/R/bw.scott.R0000644000176200001440000000132614611073307015665 0ustar liggesusers#' #' bw.scott.R #' #' Bandwidth selection rule bw.scott for point patterns in any dimension #' #' $Revision: 1.1 $ $Date: 2019/07/22 11:41:41 $ bw.scott <- function(X, isotropic=FALSE, d=NULL) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X)) if(is.null(d)) { d <- spatdim(X, intrinsic=FALSE) } else check.1.integer(d) nX <- npoints(X) cX <- coords(X, spatial=TRUE, temporal=FALSE, local=FALSE) sdX <- apply(cX, 2, sd) if(isotropic) { #' geometric mean sdX <- exp(mean(log(pmax(sdX, .Machine$double.eps)))) } b <- sdX * nX^(-1/(d+4)) names(b) <- if(isotropic) "sigma" else paste0("sigma.", colnames(cX)) return(b) } bw.scott.iso <- function(X) { bw.scott(X, isotropic=TRUE) } spatstat.explore/R/pcfinhom.R0000644000176200001440000001313114700374620015723 0ustar liggesusers# # pcfinhom.R # # $Revision: 1.28 $ $Date: 2023/03/11 06:04:23 $ # # inhomogeneous pair correlation function of point pattern # # pcfinhom <- function(X, lambda=NULL, ..., r=NULL, kernel="epanechnikov", bw=NULL, adjust.bw=1, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, reciplambda=NULL, sigma=NULL, adjust.sigma=1, varcov=NULL, close=NULL) { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- X$window areaW <- area(win) npts <- npoints(X) kernel <- match.kernel(kernel) correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } ########## intensity values ######################### a <- resolve.reciplambda(X, lambda=lambda, reciplambda=reciplambda, ..., sigma=sigma, adjust=adjust.sigma, varcov=varcov, leaveoneout=leaveoneout, update=update, check=TRUE) reciplambda <- a$reciplambda lambda <- a$lambda danger <- a$danger dangerous <- a$dangerous # renormalise if(renormalise && npts > 0) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(reciplambda))^normpower } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw, adjust=adjust.bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances if(npts > 1) { if(is.null(close)) { #' find close pairs close <- closepairs(X, rmax+hmax) } else { #' check 'close' has correct format needed <- c("i", "j", "xi", "yi", "xj", "yj", "dx", "dy", "d") if(any(is.na(match(needed, names(close))))) stop(paste("Argument", sQuote("close"), "should have components named", commasep(sQuote(needed))), call.=FALSE) } dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=win, check=FALSE) wIJ <- reciplambda[I] * reciplambda[J] } else { undefined <- rep(NaN, length(r)) } # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", quote(g[inhom](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname=c("g", "inhom")) ###### compute ####### if(any(correction=="translate")) { # translation correction if(npts > 1) { XJ <- ppp(close$xj, close$yj, window=win, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) gT <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gT <- gT * renorm.factor } else gT <- undefined out <- bind.fv(out, data.frame(trans=gT), "{hat(%s)[%s]^{Trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction if(npts > 1) { edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) gR <- sewpcf(dIJ, edgewt * wIJ, denargs, areaW, divisor)$g if(renormalise) gR <- gR * renorm.factor } else gR <- undefined out <- bind.fv(out, data.frame(iso=gR), "{hat(%s)[%s]^{Ripley}}(r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat.explore/R/scriptUtils.R0000644000176200001440000000374614733462621016465 0ustar liggesusers## scriptUtils.R ## $Revision: 1.13 $ $Date: 2024/12/27 08:27:27 $ ## slick way to use precomputed data ## If the named file exists, it is loaded, giving access to the data. ## Otherwise, 'expr' is evaluated, and all objects created ## are saved in the designated file, for loading next time. reload.or.compute <- function(filename, expr, objects=NULL, context=parent.frame(), destination=parent.frame(), force=FALSE, verbose=TRUE, exclude=NULL) { stopifnot(is.character(filename) && length(filename) == 1) if(force || !file.exists(filename)) { if(verbose) splat("Recomputing...") ## evaluate 'expr' in a fresh environment .Expr <- ee <- as.expression(substitute(expr)) en <- new.env(parent=context) assign(".Expr", ee, pos=en) local(eval(.Expr), envir=en) ## default is to save all objects that were created if(is.null(objects)) objects <- ls(envir=en) ## exclude specified objects? objects <- setdiff(objects, exclude) ## save them in the designated file save(list=objects, file=filename, compress=TRUE, envir=en) ## assign them into the parent frame for(i in seq_along(objects)) assign(objects[i], get(objects[i], envir=en), envir=destination) result <- objects } else { if(verbose) splat("Reloading from", sQuote(filename), "saved at", file.mtime(filename)) result <- load(filename, envir=destination) ## expect to find all objects listed in 'objects' and not excluded objects <- setdiff(objects, exclude) if(!all(ok <- (objects %in% result))) { nbad <- sum(!ok) warning(paste(ngettext(nbad, "object", "objects"), commasep(sQuote(objects[!ok])), ngettext(nbad, "was", "were"), "not present in data file", dQuote(filename)), call.=FALSE) } } return(invisible(result)) } spatstat.explore/R/kernel2d.R0000644000176200001440000001054014611073310015620 0ustar liggesusers#' #' kernel2d.R #' #' Two-dimensional smoothing kernels #' #' $Revision: 1.16 $ $Date: 2022/04/17 01:13:46 $ #' .Spatstat.2D.KernelTable <- list( #' table entries: #' d = density of standardised kernel #' sd = standard deviation of x coordinate, for standardised kernel #' hw = halfwidth of support of standardised kernel gaussian=list( d = function(x,y, ...) { dnorm(x) * dnorm(y) }, sd = 1, hw = 8, symmetric = TRUE), epanechnikov=list( d = function(x,y, ...) { (2/pi) * pmax(1 - (x^2+y^2), 0) }, sd = 1/sqrt(6), hw = 1, symmetric = TRUE), quartic=list( d = function(x,y, ...) { (3/pi) * pmax(1 - (x^2+y^2), 0)^2 }, sd = 1/sqrt(8), hw = 1, symmetric = TRUE), disc=list( d = function(x,y,...) { (1/pi) * as.numeric(x^2 + y^2 <= 1) }, sd = 1/2, hw = 1, symmetric = TRUE) ) validate2Dkernel <- function(kernel, fatal=TRUE) { if(is.character(match2DkernelName(kernel))) return(TRUE) if(is.im(kernel) || is.function(kernel)) return(TRUE) if(!fatal) return(FALSE) if(is.character(kernel)) stop(paste("Unrecognised choice of kernel", sQuote(kernel), paren(paste("options are", commasep(sQuote(names(.Spatstat.2D.KernelTable)))))), call.=FALSE) stop(paste("kernel should be a character string,", "a pixel image, or a function (x,y)"), call.=FALSE) } match2DkernelName <- function(kernel) { if(!is.character(kernel) || length(kernel) != 1) return(NULL) nama <- names(.Spatstat.2D.KernelTable) m <- pmatch(kernel, nama) if(is.na(m)) return(NULL) return(nama[m]) } lookup2DkernelInfo <- function(kernel) { validate2Dkernel(kernel) kernel <- match2DkernelName(kernel) if(is.null(kernel)) return(NULL) return(.Spatstat.2D.KernelTable[[kernel]]) } evaluate2Dkernel <- function(kernel, x, y, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel)) { info <- lookup2DkernelInfo(kernel) if(scalekernel) { ## kernel adjustment factor sdK <- if(is.character(kernel)) info$sd else 1 ## transform coordinates to x',y' such that kerfun(x', y') ## yields density k(x,y) at desired bandwidth if(is.null(varcov)) { rr <- sdK/sigma x <- x * rr y <- y * rr scalefactor <- rr^2 } else { SinvH <- matrixinvsqrt(varcov) rSinvH <- sdK * SinvH XY <- cbind(x, y) %*% rSinvH x <- XY[,1L] y <- XY[,2L] scalefactor <- det(rSinvH) } } ## now evaluate kernel if(is.character(kernel)) { kerfun <- info$d result <- kerfun(x, y) } else if(is.function(kernel)) { argh <- list(...) if(length(argh) > 0) argh <- argh[names(argh) %in% names(formals(kernel))] result <- do.call(kernel, append(list(x, y), argh)) if(anyNA(result)) stop("NA values returned from kernel function") if(length(result) != length(x)) stop("Kernel function returned the wrong number of values") } else if(is.im(kernel)) { result <- kernel[list(x=x, y=y)] if(anyNA(result) || length(result) != length(x)) stop("Domain of kernel image is not large enough") } else stop("Unrecognised format for kernel") if(scalekernel) result <- scalefactor * result return(result) } cutoff2Dkernel <- function(kernel, sigma=NULL, varcov=NULL, ..., scalekernel=is.character(kernel), cutoff=NULL, fatal=FALSE) { info <- lookup2DkernelInfo(kernel) ## if scalekernel = FALSE, 'cutoff' is an absolute distance ## if scalekernel = TRUE, 'cutoff' is expressed in number of s.d. if(scalekernel) { if(is.null(cutoff)) { ## template kernel's standard deviation sdK <- info$sd %orifnull% 1 ## template kernel's halfwidth hwK <- info$hw %orifnull% 8 ## cutoff for kernel with sd=1 cutoff <- hwK/sdK } ## required standard deviation if(!is.null(sigma)) { sig <- sigma } else if(!is.null(varcov)) { lam <- eigen(varcov)$values sig <- sqrt(max(lam)) } else stop("Cannot determine standard deviation") ## cutoff <- cutoff * sig } if(fatal && is.null(cutoff)) stop(paste("The argument", sQuote("cutoff"), "is required", "when the kernel is a user-defined function, and scalekernel=FALSE"), call.=FALSE) return(cutoff) } spatstat.explore/R/bermantest.R0000644000176200001440000002127314611073307016271 0ustar liggesusers# # bermantest.R # # Test statistics from Berman (1986) # # $Revision: 1.26 $ $Date: 2023/06/20 02:43:35 $ # # berman.test <- function(...) { UseMethod("berman.test") } berman.test.ppp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) force(covariate) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) fitcsr <- exactppm(X) dont.complain.about(fitcsr) do.call(bermantestEngine, resolve.defaults(list(quote(fitcsr), quote(covariate), which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } ## Code for berman.test.ppm is moved to spatstat.model bermantestEngine <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ..., modelname, covname, dataname="") { csr <- is.poisson(model) && is.stationary(model) if(missing(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(missing(covname)) { covname <- short.deparse(substitute(covariate)) if(is.character(covariate)) covname <- covariate } which <- match.arg(which) alternative <- match.arg(alternative) if(!is.poisson(model)) stop("Only implemented for Poisson point process models") #' compute required data fram <- spatialCDFframe(model, covariate, ..., modelname=modelname, covname=covname, dataname=dataname) #' evaluate berman test statistic result <- bermantestCalc(fram, which=which, alternative=alternative) return(result) } bermantestCalc <- function(fram, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { which <- match.arg(which) alternative <- match.arg(alternative) verifyclass(fram, "spatialCDFframe") fvalues <- fram$values info <- fram$info ## values of covariate at data points ZX <- fvalues$ZX ## transformed to Unif[0,1] under H0 U <- fvalues$U ## values of covariate at pixels Zvalues <- fvalues$Zvalues ## corresponding pixel areas/weights weights <- fvalues$weights ## intensity of model lambda <- fvalues$lambda ## names modelname <- info$modelname dataname <- info$dataname covname <- info$covname switch(which, Z1={ #......... Berman Z1 statistic ..................... method <- paste("Berman Z1 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) # sum of covariate values at data points Sn <- sum(ZX) # predicted mean and variance lamwt <- lambda * weights En <- sum(lamwt) ESn <- sum(lamwt * Zvalues) varSn <- sum(lamwt * Zvalues^2) # working, for plot method working <- list(meanZX=mean(ZX), meanZ=ESn/En) # standardise statistic <- (Sn - ESn)/sqrt(varSn) names(statistic) <- "Z1" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="mean value of covariate at random points is less than predicted under model", greater="mean value of covariate at random points is greater than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname)) }, Z2={ #......... Berman Z2 statistic ..................... method <- paste("Berman Z2 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) npts <- length(ZX) statistic <- sqrt(12/npts) * (sum(U) - npts/2) working <- list(meanU=mean(U)) names(statistic) <- "Z2" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="covariate values at random points have lower quantiles than predicted under model", greater="covariate values at random points have higher quantiles than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n\t", "and transformed to uniform distribution under", if(info$csr) modelname else sQuote(modelname)) }) out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=method, which=which, working=working, data.name=valuename, fram=fram) class(out) <- c("htest", "bermantest") return(out) } plot.bermantest <- function(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) { fram <- x$fram if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style ks <- x$ks values <- attr(ks, "prep") info <- attr(ks, "info") } work <- x$working op <- options(useFancyQuotes=FALSE) on.exit(options(op)) switch(x$which, Z1={ # plot cdf's of Z FZ <- values$FZ xxx <- get("x", environment(FZ)) yyy <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z1 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=xxx, y=yyy, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) FZX <- values$FZX if(is.null(FZX)) FZX <- ecdf(values$ZX) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) abline(v=work$meanZ, lwd=lwd0,col=col0, lty=lty0, xpd=FALSE) abline(v=work$meanZX, lwd=lwd,col=col, lty=lty, xpd=FALSE) }, Z2={ # plot cdf of U U <- values$U cdfU <- ecdf(U) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z2 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) dont.complain.about(cdfU) do.call(plot.ecdf, resolve.defaults( list(quote(cdfU)), list(...), list(do.points=FALSE, asp=1), list(xlim=c(0,1), ylim=c(0,1), pty="s"), list(lwd=lwd, col=col, lty=lty), list(xlab="U", ylab="relative frequency"), list(main=main))) abline(0,1,lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=0.5, lwd=lwd0,col=col0,lty=lty0, xpd=FALSE) abline(v=work$meanU, lwd=lwd,col=col,lty=lty, xpd=FALSE) }) options(op) return(invisible(NULL)) } spatstat.explore/R/envelopeArray.R0000644000176200001440000000542214611073310016731 0ustar liggesusers# # envelopeArray.R # # $Revision: 1.2 $ $Date: 2022/01/04 05:30:06 $ # # envelopeArray <- function(X, fun, ..., dataname=NULL,verb=FALSE,reuse=TRUE) { #' if(is.null(dataname)) dataname <- short.deparse(substitute(X)) #' determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") #' determine function to be called if(is.character(fun)) { fun <- get(fun, mode="function") } else if(!is.function(fun)) stop(paste(sQuote("fun"), "should be a function or a character string")) #' Apply function to data pattern, to test it #' and to determine array dimensions, margin labels etc. fX <- do.call.matched(fun, append(list(X), list(...)), matchfirst=TRUE) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") d <- dim(fX) witch <- matrix(1:prod(d), nrow=d[1L], ncol=d[2L], dimnames=dimnames(fX)) #' make function that extracts [i,j] entry of result ijfun <- function(X, ..., i=1, j=1, expectdim=d) { fX <- fun(X, ...) if(!inherits(fX, "fasp")) stop("function did not return an object of class 'fasp'") if(!all(dim(fX) == expectdim)) stop("function returned an array with different dimensions") return(fX[i,j]) } # ------------ start computing ------------------------------- if(reuse) { L <- do.call(spatstat.explore::envelope, resolve.defaults( list(quote(X), fun=ijfun), list(internal=list(eject="patterns")), list(...), list(verbose=verb))) intern <- attr(L, "internal") } else intern <- L <- NULL # compute function array and build up 'fasp' object fns <- list() k <- 0 for(i in 1:nrow(witch)) { for(j in 1:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- do.call(spatstat.explore::envelope, resolve.defaults( list(quote(X), ijfun), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname), list(i=i, j=j))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } # wrap up into 'fasp' object title <- paste("array of envelopes of", fname, "for", dataname) rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } spatstat.explore/R/ppqq.R0000644000176200001440000000737214611073310015104 0ustar liggesusers## ## ppqq.R ## ## P-P and Q-Q versions of fv objects ## PPversion <- local({ PPversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## set up inverse theoretical function f_0: 'theo' |-> 'r' xname <- fvnames(f, ".x") df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## evaluate f_0^{-1}(theo) for evenly-spaced grid of 'theo' values ra <- range(theo.table) theo.seq <- seq(from=ra[1], to=ra[2], length.out=nrow(df)) x.vals <- invfun(theo.seq) ## convert f to a function and evaluate at these 'r' values ynames <- setdiff(fvnames(f, columns), theo) ff <- as.function(f, value=ynames) y.vals <- lapply(ynames, evalselected, x=x.vals, f=ff) ## build data frame all.vals <- append(list(theo=theo.seq), y.vals) names(all.vals) <- c(theo, ynames) DF <- as.data.frame(all.vals) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.theo <- match(theo, cnames) i.yval <- match(ynames, cnames) ii <- c(i.theo, i.yval) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] result <- fv(DF, argu = theo, ylab = atr$ylab, valu = best, fmla = . ~ .x, alim = ra, labl = atr$labl[ii], desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = atr$yexp) fvnames(result, ".") <- c(ynames, theo) return(result) } evalselected <- function(what, f, x){ f(x, what=what) } PPversion }) QQversion <- function(f, theo="theo", columns=".") { if(!any(colnames(f) == theo)) stop(paste(sQuote(theo), "is not the name of a column of f")) ## extract relevant columns of data xname <- fvnames(f, ".x") ynames <- fvnames(f, columns) df <- as.data.frame(f) theo.table <- df[,theo] x.table <- df[,xname] y.table <- df[,ynames, drop=FALSE] ## set up inverse theoretical function f_0: 'theo' |-> 'r' invfun <- approxfun(x=theo.table, y=x.table, rule=1) ## apply f_0^{-1} to tabulated function values z.table <- as.data.frame(lapply(y.table, invfun)) ## build data frame DF <- cbind(df[,xname,drop=FALSE], z.table) ## set up fv object atr <- attributes(f) cnames <- colnames(f) i.x <- match(xname, cnames) i.y <- match(ynames, cnames) ii <- c(i.x, i.y) old.best <- fvnames(f, ".y") best <- if(old.best %in% ynames) old.best else ynames[length(ynames)] if(versionstring.spatstat() < package_version("1.38-2")) { fvl <- fvlabels(f, expand=TRUE) theo.string <- fvl[colnames(f) == theo] } else { theo.string <- fvlabels(f, expand=TRUE)[[theo]] } ## remove '(r)' from outer function theo.string <- sub(paren(xname), "", theo.string, fixed=TRUE) theo.expr <- parse(text=theo.string) theo.lang <- theo.expr[[1]] ylab <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$ylab, THEO=theo.lang)) yexp <- substitute({{THEO}^{-1}}(FUN), list(FUN=atr$yexp, THEO=theo.lang)) oldlabl <- atr$labl labl.iy <- sprintf("{{%s}^{-1}}(%s)", theo.string, oldlabl[i.y]) labl.ii <- c(oldlabl[i.x], labl.iy) result <- fv(DF, argu = atr$argu, ylab = ylab, valu = best, fmla = . ~ .x, alim = atr$alim, labl = labl.ii, desc = atr$desc[ii], unitname = NULL, fname = atr$fname, yexp = yexp) fvnames(result, ".") <- ynames unitname(result) <- unitname(f) return(result) } spatstat.explore/R/evidence.R0000644000176200001440000004754414635665500015730 0ustar liggesusers#' #' evidence.R #' #' evaluate covariate values at data points and at pixels #' together with intensity of null/reference model #' #' $Revision: 1.53 $ $Date: 2024/06/23 00:22:42 $ #' evalCovar <- function(model, covariate, ...) { .Deprecated("spatialCovariateEvidence", "spatstat.core") spatialCovariateEvidence(model, covariate, ...) } spatialCovariateEvidence <- function(model, covariate, ...) { UseMethod("spatialCovariateEvidence") } ## Code for spatialCovariateEvidence.ppm() is moved to spatstat.model spatialCovariateEvidence.ppp <- local({ spatialCovariateEvidence.ppp <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), dimyx=NULL, eps=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame"), interpolate=TRUE, jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL, clip.predict=TRUE) { lambdatype <- match.arg(lambdatype) dont.complain.about(lambdatype) if(is.null(modelname)) modelname <- "CSR" if(is.null(dataname)) dataname <- "data" if(is.null(covname)) { if(is.character(covariate)) covname <- covariate else covname <- singlestring(short.deparse(substitute(covariate))) } info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=TRUE, ispois=TRUE, spacename="two dimensions") X <- model W <- Window(X) LambdaBar <- intensity(X) ## numeric value or vector #' explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) { rule.eps <- match.arg(rule.eps) W <- as.mask(W, dimyx=dimyx, eps=eps, rule.eps=rule.eps) } Wfull <- Zfull <- NULL if(!is.null(subset)) { #' restrict to subset if(!clip.predict) { #' use original window for prediction Wfull <- W } X <- X[subset] W <- W[subset, drop=FALSE] } #' evaluate covariate if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(X)) { #' ................... unmarked ....................... if(is.im(covariate)) { type <- "im" if(!interpolate) { #' look up covariate values ZX <- safelookup(covariate, X) } else { #' evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) #' fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) } #' covariate values for pixels inside window Z <- covariate[W, drop=FALSE] if(!is.null(Wfull)) Zfull <- covariate[Wfull, drop=FALSE] #' corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) if(!is.null(Wfull)) Zfull <- as.im(covariate, W=Wfull) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' values of covariate in window Zvalues <- as.vector(Z[W, drop=TRUE]) #' corresponding intensity values lambda <- rep.int(LambdaBar, length(Zvalues)) #' pixel area (constant) pixelarea <- with(Z, xstep * ystep) #' lambda values at data points lambdaX <- rep.int(LambdaBar, npoints(X)) #' lambda image lambdaimage <- as.im(LambdaBar, Wfull %orifnull% W) } else { #' ................... marked ....................... if(!is.multitype(X)) stop("Only implemented for multitype patterns (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), times=length(possmarks)) names(covariate) <- as.character(possmarks) } #' if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' evaluate covariate at each data point ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' look up covariate values values <- safelookup(covariate, X) } else { #' interpolate values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) } ZX[ii] <- values } #' restrict covariate images to window Z <- solapply(covariate, "[", i=W, drop=FALSE) if(!is.null(Wfull)) Z <- solapply(covariate, "[", i=Wfull, drop=FALSE) #' extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) #' covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, getElement, name="value")) #' pixel locations locn <- lapply(Zframes, getxy) #' tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call(rbind, locn) #' corresponding fitted [conditional] intensity values lambda <- LambdaBar[as.integer(loc$marks)] #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- functioncaller(x=X$x, y=X$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' same window W <- as.mask(W) #' covariate in window Z <- list() for(k in seq_along(possmarks)) Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W, ...) #' covariate in original window, for prediction if(!is.null(Wfull)) { Zfull <- list() for(k in seq_along(possmarks)) Zfull[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=Wfull, ...) } Zpixvalues <- lapply(Z, pixelvalues) Zvalues <- unlist(Zpixvalues) #' corresponding fitted [conditional] intensity values lambda <- rep(LambdaBar, lengths(Zpixvalues)) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' collapse function body to single string covname <- singlestring(covname) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point pattern,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' lambda values at data points lambdaX <- LambdaBar[as.integer(marks(X))] #' lambda images lambdaimage <- solapply(LambdaBar, as.im, W=Wfull %orifnull% W) names(lambdaimage) <- possmarks } #' .......................................................... #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } lambdaname <- "the average intensity" check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' wrap up values <- list(Zimage = Zfull %orifnull% Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info, X=X)) # X is possibly a subset of original } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} pixarea <- function(z) { z$xstep * z$ystep } npixdefined <- function(z) { sum(!is.na(z$v)) } pixelvalues <- function(z) { as.data.frame(z)[,3L] } getxy <- function(z) { z[,c("x","y")] } ## Function caller used for marked locations (x,y,m) only. functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") if(nf == 2) return(f(x,y)) if(nf == 3) return(f(x,y,m)) argh <- list(...) extra <- intersect(names(argh), names(formals(f))[-(1:3)]) value <- do.call(f, append(list(x,y,m), argh[extra])) return(value) } spatialCovariateEvidence.ppp }) spatialCovariateEvidence.exactppm <- local({ spatialCovariateEvidence.exactppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), dimyx=NULL, eps=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame"), interpolate=TRUE, jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL, clip.predict=TRUE) { lambdatype <- match.arg(lambdatype) dont.complain.about(lambdatype) #' evaluate covariate values at data points and at pixels ispois <- TRUE csr <- is.null(model$baseline) #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else "model with baseline" if(is.null(covname)) { if(is.character(covariate)) covname <- covariate else covname <- singlestring(short.deparse(substitute(covariate))) } if(is.null(dataname)) dataname <- "X" info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=ispois, spacename="two dimensions") X <- model$X W <- Window(X) #' explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) { rule.eps <- match.arg(rule.eps) W <- as.mask(W, dimyx=dimyx, eps=eps, rule.eps=rule.eps) } Wfull <- Zfull <- NULL if(!is.null(subset)) { #' restrict to subset if(!clip.predict) { ## use original window for prediction Wfull <- W } X <- X[subset] W <- W[subset, drop=FALSE] } #' evaluate covariate if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(X)) { #' ................... unmarked ....................... if(is.im(covariate)) { type <- "im" if(!interpolate) { #' look up covariate values ZX <- safelookup(covariate, X) } else { #' evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) #' fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) } #' covariate values for pixels inside window (for calculation) Z <- covariate[W, drop=FALSE] #' covariate values for pixels inside window (for prediction) if(!is.null(Wfull)) Zfull <- covariate[Wfull, drop=FALSE] #' corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") #' window W <- as.mask(W) #' covariate in window Z <- as.im(covariate, W=W) if(!is.null(Wfull)) Zfull <- as.im(covariate, W=Wfull) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' fitted [conditional] intensity image Lambda <- predict(model, locations=W, type=lambdatype) #' restrict to W Lambda <- Lambda[W, drop=FALSE] Z <- Z[W, drop=FALSE] #' extract corresponding pixel values df <- pairs.im(Lambda, Z, plot=FALSE, drop=TRUE) Zvalues <- df$Z lambda <- df$Lambda #' pixel area (constant) pixelarea <- with(Z, xstep * ystep) } else { #' ................... marked ....................... if(!is.multitype(X)) stop("Only implemented for multitype models (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) #' predicted intensity: may restrict domain LambdaImages <- predict(model, locations=W, type=lambdatype) W <- do.call(intersect.owin, unname(lapply(LambdaImages, as.owin))) lambda <- unlist(lapply(LambdaImages, pixelvalues)) #' handle covariate #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), times=length(possmarks)) names(covariate) <- as.character(possmarks) } if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' evaluate covariate at each data point ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' look up covariate values values <- safelookup(covariate, X) } else { #' interpolate values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) } ZX[ii] <- values } #' restrict covariate images to window Z <- solapply(covariate, "[", i=W, drop=FALSE) if(!is.null(Wfull)) Zfull <- solapply(covariate, "[", i=Wfull, drop=FALSE) #' extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) #' covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, getElement, name="value")) #' pixel locations locn <- lapply(Zframes, getxy) #' tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call(rbind, locn) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at data points ZX <- functioncaller(x=X$x, y=X$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' same window W <- as.mask(W) #' covariate in window Z <- list() for(k in seq_along(possmarks)) Z[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=W, ...) Zvalues <- unlist(lapply(Z, pixelvalues)) #' covariate in original window, for prediction if(!is.null(Wfull)) { Zfull <- list() for(k in seq_along(possmarks)) Zfull[[k]] <- as.im(functioncaller, m=possmarks[k], f=covariate, W=Wfull, ...) } #' collapse function body to single string covname <- singlestring(covname) #' pixel areas pixelarea <- rep(sapply(Z, pixarea), sapply(Z, npixdefined)) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } check.finite(lambda, xname="the fitted intensity", usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' lambda values at data points lambdaX <- predict(model, locations=X) #' lambda image(s) lambdaimage <- predict(model, locations=Wfull %orifnull% W) #' wrap up values <- list(Zimage = Zfull %orifnull% Z, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info, X=X)) # X is possibly a subset of original } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} pixarea <- function(z) { z$xstep * z$ystep } npixdefined <- function(z) { sum(!is.na(z$v)) } pixelvalues <- function(z) { as.data.frame(z)[,3L] } getxy <- function(z) { z[,c("x","y")] } ## Function caller used for marked locations (x,y,m) only. functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") if(nf == 2) return(f(x,y)) if(nf == 3) return(f(x,y,m)) argh <- list(...) extra <- intersect(names(argh), names(formals(f))[-(1:3)]) value <- do.call(f, append(list(x,y,m), argh[extra])) return(value) } spatialCovariateEvidence.exactppm }) spatstat.explore/R/sdr.R0000644000176200001440000002214414611073310014705 0ustar liggesusers#' #' sdr.R #' #' Sufficient Dimension Reduction #' #' Matlab original: Yongtao Guan #' Translated to R by: Suman Rakshit #' Adapted for spatstat: Adrian Baddeley #' #' GNU Public Licence 2.0 || 3.0 #' #' $Revision: 1.15 $ $Date: 2020/01/30 05:10:49 $ #' sdr <- function(X, covariates, ...) { UseMethod("sdr") } sdr.ppp <- local({ sdr.ppp <- function(X, covariates, method=c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1=1, Dim2=1, predict=FALSE, ...) { stopifnot(is.ppp(X)) method <- match.arg(method) trap.extra.arguments(...) #' ensure 'covariates' is a list of compatible images if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") nc <- length(covariates) if(nc == 0) stop("Need at least one covariate!") if(nc < Dim1 + (method == "TSE") * Dim2) stop(paste(if(method == "TSE") "Dim1 + Dim2" else "Dim1", "must not exceed the number of covariates"), call.=FALSE) if(nc > 1 && !do.call(compatible, unname(covariates))) covariates <- do.call(harmonise, covariates) #' extract corresponding pixel values including NA's Ypixval <- sapply(lapply(covariates, as.matrix), as.vector) #' compute sample mean and covariance matrix m <- colMeans(Ypixval, na.rm=TRUE) V <- cov(Ypixval, use="complete") #' evaluate each image at point data locations YX <- sapply(covariates, safelook, Y=X) #' apply precomputed standardisation Zx <- t(t(YX) - m) %*% matrixinvsqrt(V) #' ready coordsX <- coords(X) result <- switch(method, DR = calc.DR(COV=V, z=Zx, Dim=Dim1), NNIR = calc.NNIR(COV=V, z=Zx, pos=coordsX, Dim=Dim1), SAVE = calc.SAVE(COV=V, z=Zx, Dim=Dim1), SIR = calc.SIR(COV=V, z=Zx ), TSE = calc.TSE(COV=V, z=Zx, pos=coordsX, Dim1=Dim1, Dim2=Dim2) ) #' covnames <- names(covariates) %orifnull% paste0("Y", 1:nc) dimnames(result$B) <- list(covnames, paste0("B", 1:ncol(result$B))) if(method == "TSE") { result$M1 <- namez(result$M1) result$M2 <- namez(result$M2) } else { result$M <- namez(result$M) } if(predict) result$Y <- sdrPredict(covariates, result$B) return(result) } safelook <- function(Z, Y, ...) { safelookup(Z, Y, ...) } namez <- function(M, prefix="Z") { dimnames(M) <- list(paste0(prefix, 1:nrow(M)), paste0(prefix, 1:ncol(M))) return(M) } sdr.ppp }) sdrPredict <- function(covariates, B) { if(!is.matrix(B)) { if(is.list(B) && is.matrix(BB <- B$B)) B <- BB else stop("B should be a matrix, or the result of a call to sdr()", call.=FALSE) } if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") stopifnot(nrow(B) == length(covariates)) result <- vector(mode="list", length=ncol(B)) for(j in seq_along(result)) { cj <- as.list(B[,j]) Zj <- mapply("*", cj, covariates, SIMPLIFY=FALSE) result[[j]] <- im.apply(Zj, sum) } names(result) <- colnames(B) return(as.solist(result)) } ##............ DR (Directional Regression) .......................... calc.DR <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) ncov <- ncol(z) ## M1 <- (t(z) %*% z)/ss - diag(1,ncov) M1 <- crossprod(z)/ss - diag(1,ncov) M1 <- M1 %*% M1 # the SAVE kernel covMean <- matrix(colMeans(z),ncol=1) M2 <- covMean %*% t(covMean) M3 <- M2 * (base::norm(covMean, type="2"))^2 # the SIR kernel M2 <- M2 %*% M2 # the SIR-2 kernel M <- (M1 + M2 + M3)/3 # the DR kernel SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B # back to original scale return(list(B=B, M=M)) } ## ............ NNIR (Nearest Neighbor Inverse Regression) ........... calc.NNIR <- function(COV, z, pos, Dim) { ## Description: Nearest Neighbor Inverse Regression ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## pos - the position of SPP events ## Dim - the CS dimension ## Output: ## B - the estimated CS basis ## M - the kernel matrix ss <- nrow(z) # sample size # ncov <- ncol(z) # predictor dimension jj <- nnwhich(pos) # identify nearest neighbour of each point dir <- z - z[jj, , drop=FALSE] # empirical direction IM <- sumouter(dir) # inverse of kernel matrix: sum of outer(dir[i,], dir[i,]) M <- solve(IM/ss) # invert kernel matrix SVD <- svd(M) B <- matrixinvsqrt(COV) %*% SVD$u[, 1:Dim, drop=FALSE] return(list(B=B, M=M)) } ## ........... SAVE (Sliced Average Variance Estimation) ........... calc.SAVE <- function(COV, z, Dim){ ## Description: Naive Directional Regression Method ## Input ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim - the central space dimension ## Value ## B - the estimated CS basis ## M - the kernel matrix # ss <- nrow(z) ncov <- ncol(z) M <- diag(1,ncov) - cov(z) M <- M %*% M SVD <- svd(M) B <- SVD$u[,1:Dim] B <- matrixinvsqrt(COV) %*% B return(list(B=B, M=M)) } ##.......... SIR (Sliced Inverse Regression) ...................... calc.SIR <- function(COV, z){ ## Description: Naive Directional Regression Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Output: ## B - the estimated CS basis ## M - the kernel matrix covMean <- colMeans(z) B <- matrixinvsqrt(COV) %*% covMean # do SIR estimation B <- B/sqrt(sum(B^2)) # normalise to unit length M <- covMean %*% t(covMean) # create kernel matrix return(list(B=B, M=M)) } ## ............. TSE (Two-Step Estimation) .................... calc.TSE <- function(COV, z, pos, Dim1, Dim2) { ## Description: A Two-Step Method ## Input: ## COV - cov{X(s)} ## z - standardized X(s) on SPP locations ## Dim1 - the S1 dimension ## Dim2 - the S2 dimension ## Output: ## B - the estimated CS basis. Its first Dim1 columns ## are estimating S1 and the remaining Dim2 columns are ## estimating S2. In case of null space, a zero vector is reported. ## M1 - the kernel matrix of DR ## M2 - the kernel matrix of NNIR, which might be subject ## to some change, depending on the results of M1. # ss <- nrow(z) # sample size ncov <- ncol(z) # predictor dimension est1 <- calc.DR(COV, z, ncov) # do DR estimation est2 <- calc.NNIR(COV, z, pos, ncov) # do NNIR estimation M1 <- est1$M M2 <- est2$M if(Dim1 > 0) { U <- svd(M1)$u B1 <- U[ , 1:Dim1, drop=FALSE] # get S1 estimate ## Q <- diag(1, ncov) - B1 %*% solve(t(B1) %*% B1) %*% t(B1) Q <- diag(1, ncov) - B1 %*% solve(crossprod(B1)) %*% t(B1) # contract orthogonal basis M2 <- Q %*% M2 %*% Q # do constrained NNIR } else { B1 <- matrix(0, ncov, 1) } if(Dim2 > 0) { U <- svd(M2)$u # do SVD for possibly updated M2 B2 <- U[ , 1:Dim2, drop=FALSE] # get basis estimator } else { B2 <- matrix(0, ncov, 1) } B <- matrixinvsqrt(COV) %*% cbind(B1,B2) return(list(B=B, M1=M1, M2=M2)) } ## ////////////////// ADDITIONAL FUNCTIONS ///////////////////// subspaceDistance <- function(B0,B1) { ## ======================================================== # ## Evaluate the distance between the two linear spaces S(B0) and S(B1). ## The measure used is the one proposed by Li et al. (2004). ## ======================================================== # stopifnot(is.matrix(B0)) stopifnot(is.matrix(B1)) ## Proj0 <- B0 %*% solve((t(B0) %*% B0)) %*% t(B0) # Proj matrix on S(B0) Proj0 <- B0 %*% solve(crossprod(B0)) %*% t(B0) # Proj matrix on S(B0) lam <- svd(B1) # check whether B1 is singular U <- lam$u D <- lam$d # V <- lam$v B2 <- U[, D > 1e-09] # keep non-singular directions Proj1 <- B2 %*% solve((t(B2) %*% B2)) %*% t(B2) # Proj matrix on S(B.hat) Svd <- svd(Proj0 - Proj1) # Do svd for P0-P1 dist <- max(abs(Svd$d)) # Get the maximum absolute svd value return(dist) } dimhat <- function(M){ #' Description: Maximum Descent Estimator for CS Dim #' Input: #' M - the estimated kernel matrix #' Output: #' dimhat - the estimated CS dim (assume dim>0) stopifnot(is.matrix(M)) ncov <- ncol(M) # predictor dimension maxdim <- max((ncov-1), 5) # maximum structure dimension SVD <- svd(M) # svd of kernel matrix lam <- SVD$d eps <- 1e-06 lam <- lam + rep(eps,ncov) # add ridge effect lam1 <- lam[-ncov] lam2 <- lam[-1] dif <- lam1/lam2 dif <- dif[1 : maxdim] # the magnitude of drop retval <- which.max(dif) # find Maximum Descent estimator return(retval) } spatstat.explore/R/rotmean.R0000644000176200001440000000333514611073310015563 0ustar liggesusers## ## rotmean.R ## ## rotational average of pixel values ## ## $Revision: 1.13 $ $Date: 2020/05/22 02:43:34 $ rotmean <- function(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im"), adjust=1) { if(missing(Xname)) Xname <- sensiblevarname(short.deparse(substitute(X)), "X") trap.extra.arguments(..., .Context="rotmean") stopifnot(is.im(X)) if(!missing(origin)) { X <- shift(X, origin=origin) backshift <- -getlastshift(X) } else { backshift <- NULL } result <- match.arg(result) rmax <- with(vertices(Frame(X)), sqrt(max(x^2+y^2))) Xunpad <- X if(padzero) X <- padimage(na.handle.im(X, 0), 0, W=square(c(-1,1)*rmax)) Xdata <- as.data.frame(X) values <- Xdata$value radii <- with(Xdata, sqrt(x^2+y^2)) ra <- pmin(range(radii), rmax) bw <- adjust * 0.1 * sqrt(X$xstep^2 + X$ystep^2) a <- unnormdensity(radii, from=ra[1], to=ra[2], bw=bw) b <- unnormdensity(radii, weights=values, from=ra[1], to=ra[2], bw=a$bw) df <- data.frame(r=a$x, f=b$y/a$y) FUN <- fv(df, argu="r", ylab=substitute(bar(X)(r), list(X=as.name(Xname))), valu="f", fmla=(. ~ r), alim=ra, labl=c("r", "%s(r)"), desc=c("distance argument r", "rotational average"), unitname=unitname(X), fname=paste0("bar", paren(Xname))) attr(FUN, "dotnames") <- "f" unitname(FUN) <- unitname(X) if(result == "fv") return(FUN) ## compute image FUN <- as.function(FUN) XX <- as.im(Xunpad, na.replace=1) IM <- as.im(function(x,y,FUN){ FUN(sqrt(x^2+y^2)) }, XX, FUN=FUN) if(!is.null(backshift)) IM <- shift(IM,backshift) unitname(IM) <- unitname(X) return(IM) } spatstat.explore/R/spatialQuantile.R0000644000176200001440000001325114633203032017254 0ustar liggesusers#' #' Spatially weighted quantile #' SpatialMedian <- function(X, ...) { UseMethod("SpatialMedian") } SpatialQuantile <- function(X, prob=0.5, ...) { UseMethod("SpatialQuantile") } #' methods for 'ppp' class SpatialMedian.ppp <- function(X, sigma=NULL, ..., type=4, at=c("pixels", "points"), leaveoneout=TRUE, weights=NULL, edge=TRUE, diggle=FALSE, verbose=FALSE) { SpatialQuantile.ppp(X, sigma=sigma, prob=0.5, ..., type=type, at=at, leaveoneout=leaveoneout, weights=weights, edge=edge, diggle=diggle, verbose=verbose) } SpatialQuantile.ppp <- function(X, prob=0.5, sigma=NULL, ..., type=1, at=c("pixels", "points"), leaveoneout=TRUE, weights=NULL, edge=TRUE, diggle=FALSE, verbose=FALSE) { if(!is.ppp(X)) stop("X should be a point pattern") if(!is.marked(X)) stop("The point pattern X should have marks") check.1.real(prob) stopifnot(prob >= 0) stopifnot(prob <= 1) at <- match.arg(at) atName <- switch(at, pixels="pixels", points="data points") check.1.integer(type) type <- as.integer(type) if(!any(type == c(1L,4L))) stop(paste("Quantiles of type", type, "are not supported"), call.=FALSE) ## extract marks X <- coerce.marks.numeric(X) m <- marks(X) ## multiple columns of marks? if(!is.null(dim(m)) && ncol(m) > 1) { ## compute separately for each column Xlist <- unstack(X) Zlist <- lapply(Xlist, SpatialQuantile, prob=prob, sigma=sigma, ..., type=type, at=at, leaveoneout=leaveoneout, weights=weights, edge=edge, diggle=diggle, verbose=verbose) ZZ <- switch(at, pixels = as.imlist(Zlist), points = do.call(data.frame, Zlist)) return(ZZ) } ## single column of marks m <- as.numeric(m) nX <- npoints(X) #' unique mark values um <- sort(unique(m)) Num <- length(um) #' trivial cases if(nX == 0 || ((Num == 1) && leaveoneout)) { Z <- switch(at, pixels = as.im(NA_real_, W=Window(X), ...), points = rep(NA_real_, nX)) attr(Z, "sigma") <- sigma return(Z) } if(Num == 1) { Z <- switch(at, pixels = as.im(um[1], W=Window(X), ...), points = rep(um[1], nX)) attr(Z, "sigma") <- sigma return(Z) } #' numerical weights if(!is.null(weights)) { check.nvector(weights, nX, vname="weights") if(any(weights < 0)) stop("Negative weights are not permitted") if(sum(weights) < .Machine$double.eps) stop("Weights are numerically zero; quantiles are undefined", call.=FALSE) } #' start main calculation ## bandwidth selector if(is.function(sigma)) sigma <- sigma(X, ...) #' edge correction has no effect if diggle=FALSE #' (because uniform edge correction cancels) edge <- edge && diggle #' smoothed intensity of entire pattern UX <- unmark(X) LX <- density(UX, ..., sigma=sigma, at=at, leaveoneout=leaveoneout, weights=weights, edge=edge, diggle=diggle, positive=TRUE) #' extract smoothing bandwidth actually used sigma <- attr(LX, "sigma") varcov <- attr(LX, "varcov") #' initialise result Z <- LX Z[] <- NA #' guard against underflow tinythresh <- 8 * .Machine$double.eps if(underflow <- (min(LX) < tinythresh)) { Bad <- (LX < tinythresh) warning(paste("Numerical underflow detected at", percentage(Bad, 1), "of", paste0(atName, ";"), "sigma is probably too small"), call.=FALSE) #' apply l'Hopital's Rule at the problem locations Z[Bad] <- nnmark(X, at=at, xy=LX)[Bad] Good <- !Bad } #' compute for(k in 1:Num) { #' cumulative spatial weight of points with marks <= m_[k] if(k == Num) { Acum.k <- 1 } else { w.k <- (m <= um[k]) * (weights %orifnull% 1) Lcum.k <- density(UX, weights=w.k, sigma=sigma, varcov=varcov, xy=LX, at=at, leaveoneout=leaveoneout, edge=edge, diggle=diggle, positive=TRUE) Acum.k <- Lcum.k/LX } if(k == 1) { #' region where quantile is um[1] relevant <- (Acum.k >= prob) if(underflow) relevant <- relevant & Good if(any(relevant)) { Z[relevant] <- um[1] if(verbose) splat("value um[1] =", um[1], "assigned to", sum(relevant), atName) } } else { #' region where quantile is between um[k-1] and um[k] unassigned <- (Acum.kprev < prob) if(underflow) unassigned <- unassigned & Good if(!any(unassigned)) break relevant <- unassigned & (Acum.k >= prob) if(any(relevant)) { if(type == 1) { ## left-continuous inverse left <- (Acum.k > prob) Z[relevant & left] <- um[k-1] Z[relevant & !left] <- um[k] } else if(type == 4) { ## linear interpolation Z[relevant] <- um[k-1] + (um[k] - um[k-1]) * ((prob - Acum.kprev)/(Acum.k - Acum.kprev))[relevant] } if(verbose) splat("values between", paste0("um", paren(k-1, "[")), "=", um[k-1], "and", paste0("um", paren(k, "[")), "=", um[k], "assigned to", sum(relevant), atName) } } Acum.kprev <- Acum.k } attr(Z, "sigma") <- sigma attr(Z, "varcov") <- varcov return(Z) } spatstat.explore/R/smooth.ppp.R0000644000176200001440000013633314633203032016232 0ustar liggesusers# # smooth.ppp.R # # Smooth the marks of a point pattern # # $Revision: 1.90 $ $Date: 2024/06/09 00:02:42 $ # Smooth <- function(X, ...) { UseMethod("Smooth") } Smooth.solist <- function(X, ...) { solapply(X, Smooth, ...) } Smooth.ppp <- function(X, sigma=NULL, ..., weights=rep(1, npoints(X)), at="pixels", leaveoneout=TRUE, adjust=1, varcov=NULL, edge=TRUE, diggle=FALSE, kernel="gaussian", scalekernel=is.character(kernel), se=FALSE, loctype=c("random", "fixed"), wtype=c("multiplicity", "importance"), geometric=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE, na.action="fatal")) stop("X should be a marked point pattern", call.=FALSE) nX <- npoints(X) X <- coerce.marks.numeric(X) marx <- marks(X) if(!all(is.finite(as.matrix(marx)))) stop("Some mark values are Inf, NaN or NA", call.=FALSE) univariate <- is.null(dim(marx)) ## options at <- pickoption("output location type", at, c(pixels="pixels", points="points")) loctype <- match.arg(loctype) wtype <- match.arg(wtype) ## trivial case if(nX == 0) { cn <- colnames(marks(X)) nc <- length(cn) switch(at, points = { Estimate <- if(univariate) numeric(0) else matrix(, 0, nc, dimnames=list(NULL, cn)) result <- if(!se) Estimate else cbind(estimate=Estimate, SE=Estimate) }, pixels = { Estimate <- as.im(NA_real_, Window(X)) if(!univariate) { Estimate <- as.solist(rep(list(Estimate), nc)) names(Estimate) <- cn } result <- if(!se) { Estimate } else if(univariate) { solist(estimate=Estimate, SE=Estimate) } else { list(estimate=Estimate, SE=Estimate) } }) return(result) } ## ensure weights are numeric if(weighted <- !missing(weights) && !is.null(weights)) { pa <- parent.frame() weights <- pointweights(X, weights=weights, parent=pa) weighted <- !is.null(weights) } else weights <- NULL ## geometric mean smoothing if(geometric) return(ExpSmoothLog(X, sigma=sigma, ..., at=at, adjust=adjust, varcov=varcov, kernel=kernel, scalekernel=scalekernel, se=se, loctype=loctype, wtype=wtype, weights=weights, edge=edge, diggle=diggle)) ## determine smoothing parameters if(scalekernel) { ker <- resolve.2D.kernel(sigma=sigma, ..., adjust=adjust, varcov=varcov, kernel=kernel, x=X, bwfun=bw.smoothppp, allow.zero=TRUE) sigma <- ker$sigma varcov <- ker$varcov adjust <- 1 } ## ............ infinite bandwidth ............................... if(bandwidth.is.infinite(sigma)) { #' uniform estimate if(!weighted) weights <- rep(1, nX) wtmark <- weights * marx totwt <- sum(weights) totwtmark <- if(univariate) sum(wtmark) else colSums(wtmark) W <- Window(X) switch(at, pixels = { Estimate <- solapply(totwtmark/totwt, as.im, W=W, ...) names(Estimate) <- colnames(marx) if(univariate) Estimate <- Estimate[[1L]] }, points = { denominator <- rep(totwt, nX) numerator <- rep(totwtmark, each=nX) if(!univariate) numerator <- matrix(numerator, nrow=nX) if(leaveoneout) { numerator <- numerator - wtmark denominator <- denominator - weights } Estimate <- numerator/denominator if(!univariate) colnames(Estimate) <- colnames(marx) }) if(!se) { result <- Estimate } else { ## calculate standard error (constant value) if(univariate) { V <- if(!weighted) var(marx) else switch(loctype, fixed = { switch(wtype, multiplicity = weighted.var(marx, weights), importance = var(marx * weights)) }, random = { switch(wtype, multiplicity = VarOfWtdMean(marx, weights), importance = VarOfWtdMean(marx, weights^2)) }) SE <- sqrt(V) # single value } else { V <- if(!weighted) sapply(marx, var) else switch(loctype, fixed = { switch(wtype, multiplicity = sapply(marx, weighted.var, wt=weights), importance = sapply(marx * weights, var)) }, random = { switch(wtype, multiplicity = sapply(marx, VarOfWtdMean, weights=weights), importance = sapply(marx, VarOfWtdMean, weights=weights^2)) }) SE <- sqrt(V) # vector } ## replicate constant value switch(at, pixels = { if(univariate) { SE <- as.im(SE, W=W, ...) # constant image result <- solist(estimate=Estimate, SE=SE) } else { SE <- solapply(SE, as.im, ...) # list of images result <- list(estimate=Estimate, SE=SE) } }, points = { if(univariate) { SE <- rep(SE, nX) result <- cbind(estimate=Estimate, SE=SE) } else { SE <- matrix(SE[col(marx)], nrow=nX, ncol=ncol(marx)) colnames(SE) <- colnames(marx) result <- cbind(estimate=Estimate, SE=SE) } }) } return(result) } ## .................. finite bandwidth ............................. ## Diggle's edge correction? if(diggle && !edge) warning("Option diggle=TRUE overridden by edge=FALSE") diggle <- diggle && edge ## ## cutoff distance (beyond which the kernel value is treated as zero) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, adjust=adjust, ..., fatal=TRUE) ## ................... bandwidth close to zero ..................... if(cutoff < minnndist(X)) { # very small bandwidth if(!leaveoneout && at=="points") { warning(paste("Bandwidth is close to zero:", "original values returned")) Estimate <- marks(X) } else { warning(paste("Bandwidth is close to zero:", "nearest-neighbour interpolation performed")) Estimate <- nnmark(X, ..., k=1, at=at) } if(!se) { result <- Estimate } else { SE <- 0 * Estimate switch(at, pixels = { if(univariate) { result <- solist(estimate=Estimate, SE=SE) } else { result <- list(estimate=Estimate, SE=SE) } }, points = { result <- cbind(estimate=Estimate, SE=SE) }) } return(result) } ## ................... bandwidth >> 0 ......................... if(se) { ## ................... STANDARD ERROR CALCULATION .............. ## This has to be done now because the subsequent code ## fiddles with the weights. weightspower <- if(!weighted) 1 else switch(wtype, importance = weights^2, multiplicity = weights) if(diggle) { ## Jones-Diggle correction weights e(x_i) edgeim <- second.moment.calc(X, sigma, what="edge", ..., varcov=varcov) edgeX <- safelookup(edgeim, X, warn=FALSE) invmassX <- 1/edgeX invmassX[!is.finite(invmassX)] <- 0 } else { invmassX <- 1 } ## switch(loctype, random = { denom <- density(X, sigma=sigma, ..., weights=weights, edge=edge, diggle=diggle, at=at, leaveoneout=leaveoneout) numer <- density(X, sigma=sigma, ..., weights=if(weighted) weights * marx else marx, edge=edge, diggle=diggle, at=at, leaveoneout=leaveoneout) varNum <- density(X, sigma=sigma, ..., kerpow=2, weights=weightspower * marx^2 * invmassX^2, edge=FALSE, diggle=FALSE, at=at, leaveoneout=leaveoneout) covND <- density(X, sigma=sigma, ..., kerpow=2, weights=weightspower * marx * invmassX^2, edge=FALSE, diggle=FALSE, at=at, leaveoneout=leaveoneout) varDen <- density(X, sigma=sigma, ..., kerpow=2, weights=weightspower * invmassX^2, edge=FALSE, diggle=FALSE, at=at, leaveoneout=leaveoneout) if(univariate || at == "points") { Vest <- DeltaMethodVarOfRatio(numer, denom, varNum, varDen, covND) } else { Vest <- mapply(DeltaMethodVarOfRatio, num=numer, varnum=varNum, covnumden=covND, MoreArgs = list(den=denom, varden=varDen), SIMPLIFY=FALSE) Vest <- as.solist(Vest) } }, fixed = { ## Use leave-one-out deviation dev <- marks(X) - Smooth(X, sigma=sigma, ..., weights=weights, edge=edge, diggle=diggle, at="points", leaveoneout=TRUE) if(!univariate) dev <- asNumericMatrix(as.data.frame(dev)) ## calculate variance of numerator using leave-one-out estimates dataweight <- dev^2 if(weighted) dataweight <- dataweight * switch(wtype, importance=weights^2, multiplicity=weights) if(edge && diggle) dataweight <- dataweight * invmassX^2 ## variance of numerator Vnum <- density(unmark(X), sigma=sigma, kerpow=2, weights=dataweight, at=at, leaveoneout=leaveoneout, edge=FALSE, diggle=FALSE, # sic positive=TRUE) if(at == "points" && !univariate) Vnum <- asNumericMatrix(as.data.frame(Vnum)) ## rescale by denominator^2 Den <- density(unmark(X), sigma=sigma, weights=weights, edge=edge && diggle, diggle=diggle, # sic at=at, leaveoneout=leaveoneout, positive=TRUE) Vest <- if(at == "points") Vnum/Den^2 else imagelistOp(Vnum, Den^2, "/") }) SE <- sqrt(Vest) } ## ------------------------------------------------------------ ## >>>>>>>>>>>. MAIN CALCULATION OF ESTIMATE <<<<<<<<<<<<<<<<<< ## ------------------------------------------------------------ if(diggle) { ## absorb Diggle edge correction into weights vector edg <- second.moment.calc(X, sigma, what="edge", ..., varcov=varcov, adjust=adjust, kernel=kernel, scalekernel=scalekernel) ei <- safelookup(edg, X, warn=FALSE) weights <- if(weighted) weights/ei else 1/ei weights[!is.finite(weights)] <- 0 weighted <- TRUE } ## rescale weights to avoid numerical gremlins if(weighted && ((mw <- median(abs(weights))) > 0)) weights <- weights/mw ## calculate... uhoh <- NULL if(!is.data.frame(marx)) { # ........ vector of marks ................... values <- marx if(is.factor(values)) warning("Factor valued marks were converted to integers", call.=FALSE) values <- as.numeric(values) ## detect constant values ra <- range(values, na.rm=TRUE) if(diff(ra) == 0) { switch(at, points = { result <- values }, pixels = { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) result <- as.im(ra[1], M) }) } else { switch(at, points={ result <- do.call(smoothpointsEngine, resolve.defaults(list(x=quote(X), values=quote(values), weights=quote(weights), leaveoneout=leaveoneout, sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) }, pixels={ values.weights <- if(weighted) values * weights else values dont.complain.about(values.weights) numerator <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at="pixels", weights = quote(values.weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at="pixels", weights = quote(weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) result <- eval.im(numerator/denominator) ## trap small values of denominator ## trap NaN and +/- Inf values of result, but not NA eps <- .Machine$double.eps nbg <- eval.im(is.infinite(result) | is.nan(result) | (denominator < eps)) uhoh <- attr(numerator, "warnings") if(any(as.matrix(nbg), na.rm=TRUE)) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) ## l'Hopital's rule distX <- distmap(X, xy=numerator) whichnn <- attr(distX, "index") nnvalues <- eval.im(values[whichnn]) result[nbg] <- nnvalues[nbg] } }) } } else { ## ......... data frame of marks .................. ## convert to numerical values if(any(sapply(as.list(marx), is.factor))) warning("Factor columns of marks were converted to integers", call.=FALSE) marx <- asNumericMatrix(marx) ## detect constant columns ra <- apply(marx, 2, range, na.rm=TRUE) isconst <- (apply(ra, 2, diff) == 0) if(anyisconst <- any(isconst)) { oldmarx <- marx # oldX <- X marx <- marx[, !isconst] X <- X %mark% marx } if(any(!isconst)) { ## compute denominator denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at=at, leaveoneout=leaveoneout, weights = quote(weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) ## compute numerator for each column of marks marx.weights <- if(weighted) marx * weights else marx dont.complain.about(marx.weights) numerators <- do.call(density.ppp, resolve.defaults(list(x=quote(X), at=at, leaveoneout=leaveoneout, weights = quote(marx.weights), sigma=sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, edge=FALSE), list(...))) uhoh <- attr(numerators, "warnings") ## calculate ratios switch(at, points={ if(is.null(uhoh)) { ## numerators is a matrix (or may have dropped to vector) if(is.data.frame(numerators)) { numerators <- as.matrix(numerators) } else if(!is.matrix(numerators)) { numerators <- matrix(unlist(numerators), nrow=npoints(X)) } ratio <- numerators/denominator if(any(badpoints <- matrowany(!is.finite(ratio)))) { whichnnX <- nnwhich(X) ratio[badpoints,] <- as.matrix(marx[whichnnX[badpoints], , drop=FALSE]) } } else { warning("returning original values") ratio <- marx } result <- as.data.frame(ratio) colnames(result) <- colnames(marx) }, pixels={ ## numerators is a list of images (or may have dropped to 'im') if(is.im(numerators)) numerators <- list(numerators) result <- solapply(numerators, "/", e2=denominator) eps <- .Machine$double.eps denOK <- eval.im(denominator >= eps) if(!is.null(uhoh) || !all(denOK)) { ## compute nearest neighbour map on same raster distX <- distmap(X, xy=denominator) whichnnX <- attr(distX, "index") ## fix images allgood <- TRUE for(j in 1:length(result)) { ratj <- result[[j]] valj <- marx[,j] goodj <- eval.im(is.finite(ratj) & denOK) result[[j]] <- eval.im(goodj, ratj, valj[whichnnX]) allgood <- allgood && all(goodj) } if(!allgood) { warning(paste("Numerical underflow detected:", "sigma is probably too small"), call.=FALSE) uhoh <- unique(c(uhoh, "underflow")) } } names(result) <- colnames(marx) }) } else result <- NULL if(anyisconst) { partresult <- result switch(at, points = { nX <- npoints(X) result <- matrix(, nX, ncol(oldmarx)) if(length(partresult) > 0) result[,!isconst] <- as.matrix(partresult) result[,isconst] <- rep(ra[1,isconst], each=nX) colnames(result) <- colnames(oldmarx) }, pixels = { result <- vector(mode="list", length=ncol(oldmarx)) if(length(partresult) > 0) { result[!isconst] <- partresult M <- as.owin(partresult[[1]]) } else { M <- do.call.matched(as.mask, list(w=as.owin(X), ...)) } result[isconst] <- lapply(ra[1, isconst], as.im, W=M) result <- as.solist(result) names(result) <- colnames(oldmarx) }) } } ## wrap up attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov if(length(uhoh)) attr(result, "warnings") <- uhoh ## tack on standard errors? if(se) { result <- list(estimate=result, SE=SE) switch(at, points = { result <- do.call(cbind, result) }, pixels = { if(univariate) result <- as.solist(result) }) } return(result) } smoothpointsEngine <- function(x, values, sigma, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, varcov=NULL, leaveoneout=TRUE, sorted=FALSE, cutoff=NULL, debug=FALSE) { stopifnot(is.logical(leaveoneout)) if(!is.null(dim(values))) stop("Internal error: smoothpointsEngine does not support multidimensional values") #' detect constant values if(diff(range(values, na.rm=TRUE)) == 0) { result <- values attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate nX <- npoints(x) if(is.null(weights)) weights <- rep(1, nX) wtval <- weights * values totwt <- sum(weights) totwtval <- sum(wtval) denominator <- rep(totwt, nX) numerator <- rep(totwtval, nX) if(leaveoneout) { numerator <- numerator - wtval denominator <- denominator - weights } result <- numerator/denominator return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance if(debug) cat(paste("cutoff=", cutoff, "\n")) # detect very small bandwidth nnd <- nndist(x) nnrange <- range(nnd) if(cutoff < nnrange[1]) { if(leaveoneout && (npoints(x) > 1)) { warning("Very small bandwidth; values of nearest neighbours returned") result <- values[nnwhich(x)] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } if(leaveoneout) { # ensure cutoff includes at least one point cutoff <- max(1.1 * nnrange[2], cutoff) } sd <- if(is.null(varcov)) sigma else sqrt(max(eigen(varcov)$values)) if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debug) cat('Transforming to standard coordinates (densityTransform=TRUE).\n') npts <- npoints(x) result <- numeric(npts) ## transform to standard coordinates xx <- x$x yy <- x$y if(is.null(varcov)) { xx <- xx/(sqrt(2) * sigma) yy <- yy/(sqrt(2) * sigma) } else { Sinv <- solve(varcov) xy <- cbind(xx, yy) %*% matrixsqrt(Sinv/2) xx <- xy[,1] yy <- xy[,2] sorted <- FALSE } ## cutoff in standard coordinates cutoff <- cutoff/(sqrt(2) * sd) ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] vv <- values[oo] } else { vv <- values } if(is.null(weights)) { zz <- .C(SE_Gsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C(SE_Gwtsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else if(isgauss && spatstat.options("densityC")) { # .................. C code ........................... if(debug) cat('Using standard code (densityC=TRUE).\n') npts <- npoints(x) result <- numeric(npts) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y vv <- values } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] vv <- values[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C(SE_smoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C(SE_wtsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SE_asmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C(SE_awtsmoopt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else { #' Either a non-Gaussian kernel or using older, partly interpreted code #' compute weighted densities if(debug) cat('Using partly-interpreted code.\n') if(is.null(weights)) { # weights are implicitly equal to 1 numerator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", weights = quote(values), sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff), list(...), list(edge=FALSE))) } else { values.weights <- values * weights dont.complain.about(values.weights) numerator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", weights = quote(values.weights), sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff), list(...), list(edge=FALSE))) denominator <- do.call(density.ppp, resolve.defaults(list(x=quote(x), at="points", weights = quote(weights), sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, sorted=sorted, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff), list(...), list(edge=FALSE))) } if(is.null(uhoh <- attr(numerator, "warnings"))) { result <- numerator/denominator result <- ifelseXB(is.finite(result), result, NA_real_) } else { warning("returning original values") result <- values attr(result, "warnings") <- uhoh } } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } markmean <- function(X, ...) { stopifnot(is.marked(X)) Y <- Smooth(X, ...) return(Y) } markvar <- function(X, sigma=NULL, ..., weights=NULL, varcov=NULL) { stopifnot(is.marked(X)) if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) E1 <- Smooth(X, sigma=sigma, varcov=varcov, weights=weights, ...) X2 <- X %mark% marks(X)^2 ## ensure smoothing bandwidth is the same! sigma <- attr(E1, "sigma") varcov <- attr(E1, "varcov") E2 <- Smooth(X2, sigma=sigma, varcov=varcov, weights=weights, ...) V <- eval.im(E2 - E1^2) return(V) } bw.smoothppp <- function(X, nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE, kernel="gaussian", varcov1=NULL) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) if(!is.null(varcov1)) check.nmatrix(varcov1, 2, things="spatial dimensions", mname="varcov1") if(is.function(kernel)) stop("Custom kernel functions are not yet supported in bw.smoothppp") X <- coerce.marks.numeric(X) # rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] # marx <- marks(X) dimmarx <- dim(marx) if(!is.null(dimmarx)) marx <- as.matrix(as.data.frame(marx)) # determine a range of bandwidth values # n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- Window(X) d <- diameter(as.rectangle(W)) # Stoyan's rule of thumb stoyan <- bw.stoyan(X) # rule of thumb based on nearest-neighbour distances nnd <- nndist(unique(X)) if(any(ok <- is.finite(nnd) & (nnd > 0))) { nnd <- nnd[ok] } else { nnd <- d/16 } if(!is.null(varcov1)) { dref <- (det(varcov1))^(1/4) d <- d/dref stoyan <- stoyan/dref nnd <- nnd/dref } if(is.null(hmin)) { hmin <- max(1.1 * min(nnd), stoyan/5) hmin <- min(d/8, hmin) } if(is.null(hmax)) { hmax <- max(stoyan * 20, 3 * mean(nnd), hmin * 2) hmax <- min(d/2, hmax) } } else stopifnot(hmin < hmax) # h <- geomseq(from=hmin, to=hmax, length.out=nh) cv <- numeric(nh) # # compute cross-validation criterion for(i in seq_len(nh)) { if(is.null(varcov1)) { yhat <- Smooth(X, sigma = h[i], at="points", leaveoneout=TRUE, kernel=kernel, sorted=TRUE) } else { yhat <- Smooth(X, varcov = (h[i]^2) * varcov1, at="points", leaveoneout=TRUE, kernel=kernel, sorted=TRUE) } if(!is.null(dimmarx)) yhat <- as.matrix(as.data.frame(yhat)) cv[i] <- mean((marx - yhat)^2) } # optimize result <- bw.optim(cv, h, hname="sigma", creator="bw.smoothppp", criterion="Least Squares Cross-Validation", warnextreme=warn, hargnames=c("hmin", "hmax"), unitname=if(is.null(varcov1)) unitname(X) else NULL, template=varcov1, exponent=2) return(result) } smoothcrossEngine <- function(Xdata, Xquery, values, sigma, ..., weights=NULL, varcov=NULL, kernel="gaussian", scalekernel=is.character(kernel), sorted=FALSE, cutoff=NULL) { validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") && scalekernel if(isTRUE(list(...)$se)) warning("Standard errors are not yet supported", call.=FALSE) if(!is.null(dim(weights))) stop("weights must be a vector") ndata <- npoints(Xdata) nquery <- npoints(Xquery) if(nquery == 0 || ndata == 0) { if(is.null(dim(values))) return(rep(NA_real_, nquery)) nuttin <- matrix(NA_real_, nrow=nquery, ncol=ncol(values)) colnames(nuttin) <- colnames(values) return(nuttin) } # validate weights if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) } else { k <- 1L stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1L) values <- rep(values, ndata) } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate if(is.null(weights)) weights <- rep(1, ndata) univariate <- is.null(dim(values)) wtval <- weights * values totwt <- sum(weights) totwtval <- if(univariate) sum(wtval) else colSums(wtval) denominator <- rep(totwt, nquery) numerator <- rep(totwtval, each=nquery) if(!univariate) numerator <- matrix(numerator, nrow=nquery) result <- numerator/denominator if(!univariate) colnames(result) <- colnames(values) return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff.orig <- cutoff cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance ## detect very small bandwidth nnc <- nncross(Xquery, Xdata) if(cutoff < min(nnc$dist)) { if(ndata > 1) { warning("Very small bandwidth; values of nearest neighbours returned") nw <- nnc$which result <- if(k == 1) values[nw] else values[nw,,drop=FALSE] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } ## Handle weights that are meant to be null if(length(weights) == 0) weights <- NULL if(!isgauss) { ## .................. non-Gaussian kernel ........................ close <- crosspairs(Xdata, Xquery, cutoff) kerij <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) ## sum the (weighted) contributions i <- close$i # data point j <- close$j # query point jfac <- factor(j, levels=seq_len(nquery)) wkerij <- if(is.null(weights)) kerij else kerij * weights[i] denominator <- tapplysum(wkerij, list(jfac)) if(k == 1L) { contribij <- wkerij * values[i] numerator <- tapplysum(contribij, list(jfac)) result <- numerator/denominator } else { result <- matrix(, nrow=nquery, ncol=k) for(kk in 1:k) { contribij <- wkerij * values[i, kk] numeratorkk <- tapplysum(contribij, list(jfac)) result[,kk] <- numeratorkk/denominator } } ## trap bad values if(any(nbg <- (is.infinite(result) | is.nan(result)))) { ## NaN or +/-Inf can occur if bandwidth is small ## Use value at nearest neighbour (by l'Hopital's rule) nnw <- nnc$which if(k == 1L) { result[nbg] <- values[nnw[nbg]] } else { bad <- which(nbg, arr.ind=TRUE) badrow <- bad[,"row"] badcol <- bad[,"col"] result[nbg] <- values[cbind(nnw[badrow], badcol)] } } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## .................. Gaussian kernel henceforth ........................ ## handle multiple columns of values if(is.matrix(values) || is.data.frame(values)) { k <- ncol(values) stopifnot(nrow(values) == npoints(Xdata)) values <- as.data.frame(values) result <- matrix(, nquery, k) colnames(result) <- colnames(values) if(!sorted) { ood <- fave.order(Xdata$x) Xdata <- Xdata[ood] values <- values[ood, ] ooq <- fave.order(Xquery$x) Xquery <- Xquery[ooq] } for(j in 1:k) result[,j] <- smoothcrossEngine(Xdata, Xquery, values[,j], sigma=sigma, varcov=varcov, weights=weights, kernel=kernel, scalekernel=scalekernel, cutoff=cutoff.orig, sorted=TRUE, ...) if(!sorted) { sortresult <- result result[ooq,] <- sortresult } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ## values must be a vector stopifnot(length(values) == npoints(Xdata) || length(values) == 1) if(length(values) == 1) values <- rep(values, ndata) result <- numeric(nquery) ## coordinates and values xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y vd <- values if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] vd <- vd[ood] } sd <- if(is.null(varcov)) sigma else sqrt(min(eigen(varcov)$values)) if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C(SE_crsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- if(sorted) weights else weights[ood] zz <- .C(SE_wtcrsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } } else { # anisotropic kernel Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SE_acrsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { wtsort <- if(sorted) weights else weights[ood] zz <- .C(SE_awtcrsmoopt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), vd = as.double(vd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnc$which[nbg]] } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } ExpSmoothLog <- function(X, ..., at=c("pixels", "points"), weights=NULL, se=FALSE) { if(se) stop("Standard errors are not yet supported when geometric=TRUE") verifyclass(X, "ppp") at <- match.arg(at) if(!is.null(weights)) check.nvector(weights, npoints(X), vname="weights") X <- coerce.marks.numeric(X) marx <- marks(X) d <- dim(marx) if(!is.null(d) && d[2] > 1) { switch(at, points = { Z <- lapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) Z <- do.call(data.frame, Z) }, pixels = { Z <- solapply(unstack(X), ExpSmoothLog, ..., at=at, weights=weights) }) return(Z) } # vector or single column of numeric marks v <- as.numeric(marx) vmin <- min(v) if(vmin < 0) stop("Negative values in geometric mean smoothing", call.=FALSE) Y <- X %mark% log(v) if(vmin > 0) { Z <- Smooth(Y, ..., at=at, weights=weights) } else { yok <- is.finite(marks(Y)) YOK <- Y[yok] weightsOK <- if(is.null(weights)) NULL else weights[yok] switch(at, points = { Z <- rep(-Inf, npoints(X)) Z[yok] <- Smooth(YOK, ..., at=at, weights=weightsOK) }, pixels = { isfinite <- nnmark(Y %mark% yok, ...) support <- solutionset(isfinite) Window(YOK) <- support Z <- as.im(-Inf, W=Window(Y), ...) Z[support] <- Smooth(YOK, ..., at=at, weights=weightsOK)[] }) } return(exp(Z)) } VarOfWtdMean <- function(marx, weights) { ## weighted average totwt <- sum(weights) totwtmark <- sum(weights * marx) Estimate <- totwtmark/totwt ## delta method approximation to variance of weighted average varnum <- sum(weights * marx^2) varden <- totwt covnumden <- totwtmark V <- DeltaMethodVarOfRatio(totwtmark, totwt, varnum, varden, covnumden) return(V) } DeltaMethodVarOfRatio <- function(num, den, varnum, varden, covnumden) { Estimate <- num/den V <- Estimate^2 * ( varnum/num^2 - 2 * covnumden/(num * den) + varden/den^2 ) return(V) } spatstat.explore/R/First.R0000644000176200001440000000062214611073307015207 0ustar liggesusers## spatstat.explore/R/First.R .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat.explore"), fields="Version") vs <- as.character(vs) putSpatstatVariable("SpatstatExploreVersion", vs) packageStartupMessage(paste("spatstat.explore", vs)) return(invisible(NULL)) } spatstat.explore/R/sharpen.R0000644000176200001440000000406214611073310015554 0ustar liggesusers# # sharpen.R # # $Revision: 1.6 $ $Date: 2013/08/29 03:52:17 $ # sharpen <- function(X, ...) { UseMethod("sharpen") } sharpen.ppp <- function(X, sigma=NULL, ..., varcov=NULL, edgecorrect=FALSE) { stopifnot(is.ppp(X)) Yx <- Smooth(X %mark% X$x, at="points", sigma=sigma, varcov=varcov, edge=TRUE) Yy <- Smooth(X %mark% X$y, at="points", sigma=sigma, varcov=varcov, edge=TRUE) # trap NaN etc nbad <- sum(!(is.finite(Yx) & is.finite(Yy))) if(nbad > 0) stop(paste(nbad, ngettext(nbad, "point is", "points are"), "undefined due to numerical problems;", "smoothing parameter is probably too small")) # W <- as.owin(X) if(edgecorrect) { # convolve x and y coordinate functions with kernel xim <- as.im(function(x,y){x}, W) yim <- as.im(function(x,y){y}, W) xblur <- blur(xim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) yblur <- blur(yim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) # evaluate at data locations xx <- safelookup(xblur, X, warn=FALSE) yy <- safelookup(yblur, X, warn=FALSE) # estimated vector bias of sharpening procedure xbias <- xx - X$x ybias <- yy - X$y # adjust Yx <- Yx - xbias Yy <- Yy - ybias # check this does not place points outside window if(any(uhoh <- !inside.owin(Yx, Yy, W))) { # determine mass of edge effect edgeim <- blur(as.im(W), sigma=sigma, varcov=varcov, normalise=FALSE, ...) edg <- safelookup(edgeim, X[uhoh], warn=FALSE) # contract bias correction Yx[uhoh] <- (1 - edg) * X$x[uhoh] + edg * Yx[uhoh] Yy[uhoh] <- (1 - edg) * X$y[uhoh] + edg * Yy[uhoh] } # check again if(any(nbg <- !inside.owin(Yx, Yy, W))) { # give up Yx[nbg] <- X$x[nbg] Yy[nbg] <- X$y[nbg] } } # make point pattern Y <- ppp(Yx, Yy, marks=marks(X), window=W) # tack on smoothing information attr(Y, "sigma") <- sigma attr(Y, "varcov") <- varcov attr(Y, "edgecorrected") <- edgecorrect return(Y) } spatstat.explore/R/smoothfun.R0000644000176200001440000000450414611073310016137 0ustar liggesusers## ## smoothfun.R ## ## Exact 'funxy' counterpart of Smooth.ppp ## ## $Revision: 1.11 $ $Date: 2023/04/01 02:25:41 $ Smoothfun <- function(X, ...) { UseMethod("Smoothfun") } Smoothfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) stop("X should be a marked point pattern") if(isTRUE(list(...)$se)) warning("Standard errors are not yet supported in Smoothfun.ppp", call.=FALSE) ## handle weights now weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X), vname="weights") } else weights <- NULL ## X <- coerce.marks.numeric(X) ## stuff <- list(Xdata=X, values=marks(X), weights=weights, edge=edge, diggle=diggle, ...) ## ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.smoothppp, allow.zero=TRUE) stuff[c("sigma", "varcov")] <- ker[c("sigma", "varcov")] ## g <- function(x, y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] Xquery <- as.ppp(Y, Window(stuff$Xdata)) do.call(smoothcrossEngine, append(list(Xquery=Xquery), stuff)) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("Smoothfun", class(g)) return(g) } print.Smoothfun <- function(x, ...) { cat("function(x,y)", "which returns", "values", "interpolated from", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.Smoothfun <- function(X, W=Window(X), ..., approx=TRUE) { stuff <- get("stuff", envir=environment(X)) if(!approx) { #' evaluate exactly at grid points result <- as.im.function(X, W=W, ...) } else { #' faster, approximate evaluation using FFT if(!is.null(W)) stuff$X <- stuff$X[W] result <- do.call(Smooth, resolve.defaults(list(...), stuff)) } return(result) } spatstat.explore/R/Ksector.R0000644000176200001440000001766414611073307015550 0ustar liggesusers# # Ksector.R Estimation of 'sector K function' # # $Revision: 1.7 $ $Date: 2022/06/27 07:37:57 $ # Ksector <- function(X, begin=0, end=360, ..., units=c("degrees", "radians"), r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), domain = NULL, ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW npairs <- npts * (npts - 1) lambda2 <- npairs/(areaW^2) rmaxdefault <- rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(!is.null(domain)) { domain <- as.owin(domain) stopifnot(is.subset.owin(domain, Window(X))) areaW <- area(domain) } units <- match.arg(units) switch(units, radians = { if(missing(end)) end <- 2 * pi check.1.real(begin) check.1.real(end) check.in.range(begin, c(-pi, 2*pi)) check.in.range(end, c(0, 2*pi)) stopifnot(begin < end) stopifnot((end - begin) <= 2 * pi) BEGIN <- begin END <- end Bname <- simplenumber(begin/pi, "pi") %orifnull% signif(begin, 3) Ename <- simplenumber(end/pi, "pi") %orifnull% signif(end, 3) }, degrees = { check.1.real(begin) check.1.real(end) check.in.range(begin, c(-90, 360)) check.in.range(end, c(0, 360)) stopifnot(begin < end) stopifnot((end - begin) <= 360) if(verbose && (end - begin) <= 2 * pi) warning("Very small interval in degrees: did you mean radians?") BEGIN <- pi* (begin/180) END <- pi * (end/180) Bname <- signif(begin, 3) Ename <- signif(end, 3) }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ## recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ## labels subscripts <- paste("sector", Bname, Ename, sep=",") ylabel <- paste("K[", subscripts, "]") ylab <- eval(parse(text=paste("quote(", ylabel, ")"))) # ylab <- parse(text=paste("K[sector,", Bname, ",", Ename, "]")) # yexp <- substitute(K[list(sector,B,E)](r), # list(B=Bname, E=Ename)) yexp <- parse(text=paste("K[list(", subscripts, ")]")) fname <- c("K", paste("list", paren(subscripts))) ## this will be the output data frame Kdf <- data.frame(r=r, theo = ((END-BEGIN)/2) * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- npairs K <- ratfv(Kdf, NULL, denom, "r", ylab = ylab, valu = "theo", fmla = NULL, alim = alim, labl = c("r","{%s[%s]^{pois}}(r)"), desc = desc, fname=fname, yexp=yexp, ratio=ratio) ## identify all close pairs rmax <- max(r) close <- as.data.frame(closepairs(X, rmax)) if(!is.null(domain)) { ## restrict to pairs with first point in 'domain' indom <- with(close, inside.owin(xi, yi, domain)) close <- close[indom, , drop=FALSE] } ## select pairs in angular range ang <- with(close, atan2(dy, dx)) %% (2*pi) if(BEGIN >= 0) { ## 0 <= begin < end ok <- (BEGIN <= ang) & (ang <= END) } else { ## begin < 0 <= end ok <- (ang >= 2 * pi + BEGIN) | (ang <= END) } close <- close[ok, , drop=FALSE] ## pairwise distances DIJ <- close$d if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/(lambda2 * areaW) # uncorrected estimate of K K <- bind.ratfv(K, quotient = data.frame(un=Kun), denominator = npairs, labl = "{hat(%s)[%s]^{un}}(r)", desc = "uncorrected estimate of %s", preferred = "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] if(!is.null(domain)) b <- b[inside.owin(X, , w=domain)] # apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r, subset=domain) Kbm <- RS$numerator/(lambda2 * denom.area) K <- bind.ratfv(K, quotient = data.frame(bord.modif=Kbm), denominator = lambda2 * areaW * denom.area, labl = "{hat(%s)[%s]^{bordm}}(r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } if(any(correction == "border")) { Kb <- RS$numerator/(lambda * RS$denom.count) K <- bind.ratfv(K, quotient = data.frame(border=Kb), denominator = npts * RS$denom.count, labl = "{hat(%s)[%s]^{bord}}(r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/(lambda2 * areaW) h <- diameter(as.rectangle(W))/2 Ktrans[r >= h] <- NA K <- bind.ratfv(K, quotient = data.frame(trans=Ktrans), denominator = npairs, labl = "{hat(%s)[%s]^{trans}}(r)", desc = "translation-corrected estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/(lambda2 * areaW) h <- diameter(W)/2 Kiso[r >= h] <- NA K <- bind.ratfv(K, quotient = data.frame(iso=Kiso), denominator = npairs, labl = "{hat(%s)[%s]^{iso}}(r)", desc = "Ripley isotropic correction estimate of %s", preferred = "iso", ratio=ratio) } # # default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) nama <- nama[!(nama %in% c("r", "rip", "ls"))] fvnames(K, ".") <- nama unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } spatstat.explore/R/lohboot.R0000644000176200001440000003047114611073310015565 0ustar liggesusers# # lohboot.R # # $Revision: 1.25 $ $Date: 2022/05/23 02:33:06 $ # # Loh's bootstrap CI's for local pcf, local K etc # spatstatLocalFunctionInfo <- function(key) { ## This table has to be built on the fly. TheTable <- list( pcf = list(Global=pcf, Local=localpcf, L=FALSE, inhom=FALSE, indices=0), Kest = list(Global=Kest, Local=localK, L=FALSE, inhom=FALSE, indices=0), Lest = list(Global=Lest, Local=localK, # stet! L=TRUE, inhom=FALSE, indices=0), pcfinhom = list(Global=pcfinhom, Local=localpcfinhom, L=FALSE, inhom=TRUE, indices=0), Kinhom = list(Global=Kinhom, Local=localKinhom, L=FALSE, inhom=TRUE, indices=0), Linhom = list(Global=Linhom, Local=localKinhom, # stet! L=TRUE, inhom=TRUE, indices=0), Kcross = list(Global=Kcross, Local=localKcross, L=FALSE, inhom=FALSE, indices=2), Lcross = list(Global=Lcross, Local=localKcross, # stet! L=TRUE, inhom=FALSE, indices=2), Kdot = list(Global=Kdot, Local=localKdot, L=FALSE, inhom=FALSE, indices=1), Ldot = list(Global=Ldot, Local=localKdot, # stet! L=TRUE, inhom=FALSE, indices=1), Kcross.inhom = list(Global=Kcross.inhom, Local=localKcross.inhom, L=FALSE, inhom=TRUE, indices=2), Lcross.inhom = list(Global=Lcross.inhom, Local=localKcross.inhom, # stet! L=TRUE, inhom=TRUE, indices=2) ) if(length(key) != 1) stop("Argument must be a single character string or function", call.=FALSE) nama <- names(TheTable) pos <- if(is.character(key)) { match(key, nama) } else if(is.function(key)) { match(list(key), lapply(TheTable, getElement, name="Global")) } else NULL if(is.na(pos)) return(NULL) out <- TheTable[[pos]] out$GlobalName <- nama[pos] return(out) } lohboot <- function(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom", "Kcross", "Lcross", "Kdot", "Ldot", "Kcross.inhom", "Lcross.inhom"), ..., block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) { stopifnot(is.ppp(X)) check.1.integer(nsim) stopifnot(nsim > 1) ## validate 'fun' fun.name <- short.deparse(substitute(fun)) if(is.character(fun)) fun <- match.arg(fun) info <- spatstatLocalFunctionInfo(fun) if(is.null(info)) stop(paste("Loh's bootstrap is not supported for the function", sQuote(fun.name)), call.=FALSE) fun <- info$GlobalName localfun <- info$Local # validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence if(!global) { probs <- c(alpha/2, 1-alpha/2) rank <- nsim * probs[2L] } else { probs <- 1-alpha rank <- nsim * probs } if(abs(rank - round(rank)) > 0.001) warning(paste("confidence level", confidence, "corresponds to a non-integer rank", paren(rank), "so quantiles will be interpolated")) ## compute local functions f <- localfun(X, ...) theo <- f$theo ## parse edge correction info correction <- attr(f, "correction") switch(correction, none = { ckey <- clab <- "un" cadj <- "uncorrected" }, border = { ckey <- "border" clab <- "bord" cadj <- "border-corrected" }, translate = { ckey <- clab <- "trans" cadj <- "translation-corrected" }, isotropic = { ckey <- clab <- "iso" cadj <- "Ripley isotropic corrected" }) ## determine indices for Kcross etc types <- levels(marks(X)) from <- resolve.1.default(list(from=types[1]), list(...)) to <- resolve.1.default(list(to=types[2]), list(...)) fromName <- make.parseable(paste(from)) toName <- make.parseable(paste(to)) ## TEMPORARY HACK for cross/dot functions. ## Uses a possibly temporary attribute to overwrite X with only "from" points. if(info$indices > 0) { X <- attr(f, "Xfrom") } # first n columns are the local pcfs (etc) for the n points of X n <- npoints(X) y <- as.matrix(as.data.frame(f))[, 1:n] nr <- nrow(y) ## ---------- Modification by Christophe Biscio ----------------- ## (some re-coding by Adrian) if(!block) { ## Adrian's wrong code ## average local statistics ymean <- .rowMeans(y, na.rm=TRUE, nr, n) ## resample ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { ## resample n points with replacement ind <- sample(n, replace=TRUE) ## average their local statistics ystar[,i] <- .rowMeans(y[,ind], nr, n, na.rm=TRUE) } } else { ## Correct block bootstrap as described by Loh. W <- Window(X) GridTess <- quadrats(boundingbox(W), nx = nx, ny =ny) ## Classify points of X into grid tiles BlockIndex <- tileindex(X$x, X$y, GridTess) ## Use only 'full' blocks if(!is.rectangle(W)) { blocks <- tiles(GridTess) fullblocks <- sapply(blocks, is.subset.owin, B = W) if(sum(fullblocks)<2) stop("Not enough blocks are fully contained in the window", call.=FALSE) warning(paste("For non-rectangular windows,", "only blocks fully contained in the window are used:", paste(sum(fullblocks), "were used and", sum(!fullblocks), "were ignored.") ), call.=FALSE) ## blocks <- blocks[fullblocks] ## adjust classification of points of X indexmap <- cumsum(fullblocks) indexmap[!fullblocks] <- NA BlockIndex <- indexmap[BlockIndex] ## adjust total number of points n <- sum(!is.na(BlockIndex)) BlockFactor <- factor(BlockIndex, levels=unique(indexmap[!is.na(indexmap)])) } else BlockFactor <- factor(BlockIndex) nmarks <- length(levels(BlockFactor)) ## Average the local function values in each block ymarks <- by(t(y), BlockFactor, colSums, na.rm=TRUE, simplify=FALSE) ## Ensure empty data yield zero if(any(isempty <- sapply(ymarks, is.null))) ymarks[isempty] <- rep(list(numeric(nr)), sum(isempty)) ymarks <- as.matrix(do.call(cbind, ymarks)) * nmarks/n ## average all the marks ymean <- .rowMeans(ymarks, na.rm=TRUE, nr, nmarks) ## Average the marks in each block ystar <- matrix(, nrow=nr, ncol=nsim) for(i in 1:nsim) { ## resample nblocks blocks with replacement ind <- sample( nmarks , replace=TRUE) ## average their local function values ystar[,i] <- .rowMeans(ymarks[,ind], nr, nmarks, na.rm=TRUE) } } ## compute quantiles if(!global) { ## pointwise quantiles hilo <- apply(ystar, 1, quantile, probs=probs, na.rm=TRUE, type=type) ## Ripley's K function correction proposed by Loh if(Vcorrection && (fun=="Kest" || fun=="Kinhom")) { Vcov=sqrt(1+2*pi*n*(f$r)^2/area.owin(W)) hilo[1L,] <- ymean+(ymean-hilo[1L,]) / Vcov hilo[2L,] <- ymean+(ymean-hilo[2L,]) / Vcov hilo <- hilo[2:1,] # switch index so hilo[1,] is lower bound basicboot <- FALSE # The basic bootstrap interval is already used. Ensure that I do not modify hilo } ## So-called "basic bootstrap interval" proposed in Loh's paper; ## the intervals are asymptotically the same if(basicboot) { hilo[1L,] <- 2*ymean-hilo[1L,] hilo[2L,] <- 2*ymean-hilo[2L,] hilo <- hilo[c(2,1),] # switch index so hilo[1,] is lower bound } } else { ## quantiles of deviation ydif <- sweep(ystar, 1, ymean) ydev <- apply(abs(ydif), 2, max, na.rm=TRUE) crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type) hilo <- rbind(ymean - crit, ymean + crit) } ## ============= End Modification by Christophe Biscio =================== ## Transform to L function if required if(info$L) { theo <- sqrt(theo/pi) ymean <- sqrt(ymean/pi) hilo <- sqrt(hilo/pi) warn.once("lohbootLfun", "The calculation of confidence intervals for L functions", "in lohboot() has changed in spatstat 1.60-0 and later;", "they are now computed by transforming the confidence intervals", "for the corresponding K functions.") } ## create fv object df <- data.frame(r=f$r, theo=theo, ymean, lo=hilo[1L,], hi=hilo[2L,]) colnames(df)[3L] <- ckey CIlevel <- paste(100 * confidence, "%% confidence", sep="") desc <- c("distance argument r", "theoretical Poisson %s", paste(cadj, "estimate of %s"), paste("lower", CIlevel, "limit for %s"), paste("upper", CIlevel, "limit for %s")) switch(fun, pcf={ fname <- "g" yexp <- ylab <- quote(g(r)) }, Kest={ fname <- "K" yexp <- ylab <- quote(K(r)) }, Lest={ fname <- "L" yexp <- ylab <- quote(L(r)) }, pcfinhom={ fname <- c("g", "inhom") yexp <- ylab <- quote(g[inhom](r)) }, Kinhom={ fname <- c("K", "inhom") yexp <- ylab <- quote(K[inhom](r)) }, Linhom={ fname <- c("L", "inhom") yexp <- ylab <- quote(L[inhom](r)) }, Kcross={ fname <- c("K", paste0("list(", fromName, ",", toName, ")")) ylab <- substitute(K[fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(K[list(fra,til)](r), list(fra=fromName,til=toName)) }, Lcross={ fname <- c("L", paste0("list(", fromName, ",", toName, ")")) ylab <- substitute(L[fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(L[list(fra,til)](r), list(fra=fromName,til=toName)) }, Kdot={ fname <- c("K", paste0(fromName, "~ symbol(\"\\267\")")) ylab <- substitute(K[fra ~ dot](r), list(fra=fromName)) yexp <- substitute(K[fra ~ symbol("\267")](r), list(fra=fromName)) }, Ldot={ fname <- c("L", paste0(fromName, "~ symbol(\"\\267\")")) ylab <- substitute(L[fra ~ dot](r), list(fra=fromName)) yexp <- substitute(L[fra ~ symbol("\267")](r), list(fra=fromName)) }, Kcross.inhom={ fname <- c("K", paste0("list(inhom,", fromName, ",", toName, ")")) ylab <- substitute(K[inhom,fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(K[list(inhom,fra,til)](r), list(fra=fromName,til=toName)) }, Lcross.inhom={ fname <- c("L", paste0("list(inhom,", fromName, ",", toName, ")")) ylab <- substitute(L[inhom,fra,til](r), list(fra=fromName,til=toName)) yexp <- substitute(L[list(inhom,fra,til)](r), list(fra=fromName,til=toName)) }) labl <- c("r", makefvlabel(NULL, NULL, fname, "pois"), makefvlabel(NULL, "hat", fname, clab), makefvlabel(NULL, "hat", fname, "loCI"), makefvlabel(NULL, "hat", fname, "hiCI")) g <- fv(df, "r", ylab=ylab, ckey, , c(0, max(f$r)), labl, desc, fname=fname, yexp=yexp) formula(g) <- . ~ r fvnames(g, ".") <- c(ckey, "theo", "hi", "lo") fvnames(g, ".s") <- c("hi", "lo") unitname(g) <- unitname(X) g } spatstat.explore/R/bw.abram.ppp.R0000644000176200001440000000350514627320075016416 0ustar liggesusers#' #' bw.abram.ppp.R #' #' Abramson bandwidths for point pattern #' #' $Revision: 1.10 $ $Date: 2024/06/03 10:39:59 $ #' bw.abram.ppp <- function(X, h0, ..., at=c("points", "pixels"), hp=h0, pilot=NULL, trim=5, smoother=density.ppp){ stopifnot(is.ppp(X)) at <- match.arg(at) if(missing(h0) || is.null(h0)) { h0 <- bw.ppl(X) } else { check.1.real(h0) stopifnot(h0 > 0) } check.1.real(trim) stopifnot(trim > 0) pilot.data <- X imwin <- as.im(Window(X), ...) if(is.im(pilot)){ if(!compatible.im(imwin,pilot)) stop("'X' and 'pilot' have incompatible spatial domains", call.=FALSE) #' clip the worst small values away pilot[pilot<=0] <- min(pilot[pilot>0]) } else if(is.ppp(pilot)){ if(!compatible.im(imwin,as.im(Window(pilot), ...))) stop("'X' and 'pilot' have incompatible spatial domains", call.=FALSE) pilot.data <- pilot } else if(!is.null(pilot)) stop("if supplied, 'pilot' must be a pixel image or a point pattern", call.=FALSE) if(!is.im(pilot)) { if(is.character(smoother)) { smoother <- get(smoother, mode="function") } else stopifnot(is.function(smoother)) pilot <- smoother(pilot.data,sigma=hp,positive=TRUE,...) } pilot <- pilot/integral(pilot) # scale to probability density pilotvalues <- safelookup(pilot, pilot.data, warn=FALSE) ## geometric mean re-scaler (Silverman, 1986; ch 5). gamma <- exp(mean(log(pilotvalues[pilotvalues > 0])))^(-0.5) switch(at, points = { pilot.X <- safelookup(pilot,X,warn=FALSE) bw <- h0 * pmin((pilot.X^(-0.5))/gamma,trim) }, pixels = { bw <- eval.im(h0 * pmin((pilot^(-0.5))/gamma, trim)) }) return(bw) } spatstat.explore/R/densityfun.R0000644000176200001440000000544014611073310016305 0ustar liggesusers## ## densityfun.R ## ## Exact 'funxy' counterpart of density.ppp ## ## $Revision: 1.15 $ $Date: 2023/04/01 02:25:53 $ densityfun <- function(X, ...) { UseMethod("densityfun") } densityfun.ppp <- function(X, sigma=NULL, ..., weights=NULL, edge=TRUE, diggle=FALSE) { verifyclass(X, "ppp") ## standard errors are not yet supported in densitycrossEngine if(isTRUE(list(...)$se)) warning("Standard errors are not yet supported in densityfun.ppp", call.=FALSE) ## handle weights now weightsgiven <- !missing(weights) && !is.null(weights) if(weightsgiven) { # convert to numeric if(is.im(weights)) { weights <- safelookup(weights, X) # includes warning if NA } else if(is.expression(weights)) weights <- eval(weights, envir=as.data.frame(X), enclos=parent.frame()) if(length(weights) == 0) weightsgiven <- FALSE } if(weightsgiven) { check.nvector(weights, npoints(X), vname="weights") } else weights <- NULL ## stuff <- list(Xdata=X, weights=weights, edge=edge, diggle=diggle, ...) ## ## determine smoothing parameters ker <- resolve.2D.kernel(sigma=sigma, ..., x=X, bwfun=bw.diggle, allow.zero=TRUE) stuff[c("sigma", "varcov")] <- ker[c("sigma", "varcov")] ## g <- function(x, y=NULL, drop=TRUE) { Y <- xy.coords(x, y)[c("x", "y")] W <- Window(stuff$Xdata) ok <- inside.owin(Y, w=W) allgood <- all(ok) if(!allgood) Y <- lapply(Y, "[", i=ok) Xquery <- as.ppp(Y, W) vals <- do.call(densitycrossEngine, append(list(Xquery=Xquery), stuff)) if(allgood || drop) return(vals) ans <- numeric(length(ok)) ans[ok] <- vals ans[!ok] <- NA attr(ans, "sigma") <- attr(vals, "sigma") return(ans) } g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("densityfun", class(g)) return(g) } print.densityfun <- function(x, ...) { cat("function(x,y)", "which returns", "kernel estimate of intensity for", fill=TRUE) X <- get("X", envir=environment(x)) print(X, ...) cat("Optional argument:", "drop=TRUE", fill=TRUE) return(invisible(NULL)) } ## Method for as.im ## (enables plot.funxy, persp.funxy, contour.funxy to work for this class) as.im.densityfun <- function(X, W=Window(X), ..., approx=TRUE) { if(!approx) { #' evaluate exactly at grid points using as.im.funxy -> as.im.function result <- as.im.function(X, W=W, ...) } else { #' faster, approximate evaluation using FFT stuff <- get("stuff", envir=environment(X)) Xdata <- stuff[["Xdata"]] otherstuff <- stuff[names(stuff) != "Xdata"] if(!missing(W)) Xdata <- Xdata[W] result <- do.call(density, resolve.defaults(list(x=quote(Xdata)), list(...), otherstuff)) } return(result) } spatstat.explore/R/cdftest.R0000644000176200001440000002614114611073310015552 0ustar liggesusers# # cdftest.R # # $Revision: 2.33 $ $Date: 2023/01/15 03:23:28 $ # # cdf.test <- function(...) { UseMethod("cdf.test") } cdf.test.ppp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- exactppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- summary(X)$marks$frequency if(all(mf > 0)) { model <- exactppm(X) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- exactppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- exactppm(X) modelname <- "CSR" } dont.complain.about(model) do.call(spatialCDFtest, resolve.defaults(list(model=quote(model), covariate=quote(covariate), test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } ## cdf.test.ppm is moved to spatstat.model ## cdf.test.slrm is moved to spatstat.model #............. helper functions ........................# spatialCDFtest <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., dimyx=NULL, eps=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame"), interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { ## conduct test based on comparison of CDF's of covariate values test <- match.arg(test) rule.eps <- match.arg(rule.eps) ## compute the essential data fra <- spatialCDFframe(model, covariate, dimyx=dimyx, eps=eps, rule.eps=rule.eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) ## calculate the test statistic result <- spatialCDFtestCalc(fra, test=test, ...) if(is.poisson(model)) return(result) ## Gibbs model: perform Monte Carlo test result$poisson.p.value <- pobs <- result$p.value result$poisson.statistic <- tobs <- result$statistic Xsim <- simulate(model, nsim=nsim, progress=verbose) sim.pvals <- sim.stats <- numeric(nsim) if(verbose) { cat("Processing.. ") state <- list() } for(i in seq_len(nsim)) { model.i <- update(model, Xsim[[i]]) fra.i <- spatialCDFframe(model.i, covariate, dimyx=dimyx, eps=eps, rule.eps=rule.eps, interpolate=interpolate, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) res.i <- spatialCDFtestCalc(fra.i, test=test, ..., details=FALSE) sim.pvals[i] <- res.i$p.value sim.stats[i] <- res.i$statistic if(verbose) state <- progressreport(i, nsim, state=state) } if(verbose) cat("Done.\n") result$sim.pvals <- sim.pvals result$sim.stats <- sim.stats ## Monte Carlo p-value ## For tied p-values, first compare values of test statistics ## (because p = 0 may occur due to rounding) ## otherwise resolve ties by randomisation nless <- sum(sim.pvals < pobs) nplus <- sum(sim.pvals == pobs & sim.stats > tobs) nties <- sum(sim.pvals == pobs & sim.stats == tobs) result$p.value <- (nless + nplus + sample(0:nties, 1L))/(nsim+1L) ## modify the 'htest' entries testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Monte Carlo spatial", testname, "test", "of Gibbs process in", fra$info$spacename) return(result) } spatialCDFtestCalc <- function(fra, test=c("ks", "cvm", "ad"), ..., details=TRUE) { test <- match.arg(test) values <- fra$values info <- fra$info ## Test uniformity of transformed values U <- values$U result <- switch(test, ks = ks.test(U, "punif", ...), cvm = cvm.test(U, "punif", ...), ad = ad.test(U, "punif", ...)) # shortcut for internal use only if(!details) return(result) ## add a full explanation, internal data, etc. ## modify the 'htest' entries csr <- info$csr ispois <- info$ispois modelname <- if(csr) "CSR" else if(ispois) "inhomogeneous Poisson process" else "Gibbs process" testname <- switch(test, ks="Kolmogorov-Smirnov", cvm="Cramer-Von Mises", ad="Anderson-Darling") result$method <- paste("Spatial", testname, "test of", modelname, "in", info$spacename) result$data.name <- paste("covariate", sQuote(singlestring(info$covname)), "evaluated at points of", sQuote(info$dataname), "\n and transformed to uniform distribution under", if(csr) info$modelname else sQuote(info$modelname)) ## include internal data attr(result, "frame") <- fra ## additional class 'cdftest' class(result) <- c("cdftest", class(result)) return(result) } spatialCDFframe <- function(model, covariate, ..., jitter=TRUE, covariateAtPoints=NULL, make.quantile.function=FALSE) { # evaluate CDF of covariate values at data points and at pixels stuff <- spatialCovariateEvidence(model, covariate, ..., jitter=jitter) # extract values <- stuff$values # info <- stuff$info Zvalues <- values$Zvalues lambda <- values$lambda weights <- values$weights ZX <- covariateAtPoints %orifnull% values$ZX # compute empirical cdf of Z values at points of X FZX <- ecdf(ZX) # form weighted cdf of Z values in window wts <- lambda * weights sumwts <- sum(wts) FZ <- ewcdf(Zvalues, wts/sumwts) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) minZX <- min(ZX, na.rm=TRUE) minxxx <- min(xxx, na.rm=TRUE) if(minxxx > minZX) { xxx <- c(minZX, xxx) yyy <- c(0, yyy) } maxZX <- max(ZX, na.rm=TRUE) maxxxx <- max(xxx, na.rm=TRUE) if(maxxxx < maxZX) { xxx <- c(xxx, maxZX) yyy <- c(yyy, 1) } if(length(xxx) > 1) { ## replace by piecewise linear approximation FZ <- approxfun(xxx, yyy, rule=2) class(FZ) <- c("interpolatedCDF", class(FZ)) } # now apply cdf U <- FZ(ZX) if(jitter) { ## Z values have already been jittered, but this does not guarantee ## that U values are distinct nU <- length(U) U <- U + runif(nU, -1, 1)/max(100, 2*nU) U <- pmax(0, pmin(1, U)) } # pack up stuff$values$FZ <- FZ stuff$values$FZX <- FZX stuff$values$U <- U stuff$values$EN <- sumwts ## integral of intensity = expected number of pts if(make.quantile.function) stuff$values$FZinverse <- quantilefun(FZ) ## right-continuous inverse of FZ class(stuff) <- "spatialCDFframe" return(stuff) } plot.cdftest <- function(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend=TRUE) { style <- match.arg(style) fram <- attr(x, "frame") if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style values <- attr(x, "prep") info <- attr(x, "info") } # cdf of covariate Z over window FZ <- values$FZ # cdf of covariate values at data points FZX <- values$FZX # blurb covname <- info$covname covdescrip <- switch(covname, x="x coordinate", y="y coordinate", paste("covariate", dQuote(covname))) # plot it switch(style, cdf={ # plot both cdf's superimposed qZ <- get("x", environment(FZ)) pZ <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of", covdescrip), paste("p-value=", signif(x$p.value, 4))) do.call(plot.default, resolve.defaults( list(x=qZ, y=pZ, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) if(do.legend) legend("topleft", c("observed", "expected"), lwd=c(lwd,lwd0), col=c(col2hex(col), col2hex(col0)), lty=c(lty2char(lty),lty2char(lty0))) }, PP={ ## plot FZX o (FZ)^{-1} ## y-axis: sample probabilities i/n for i=1, .., n ## x-axis: corresponding reference probabilities P(Z < z_[i]) pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) p0 <- FZ(qX) do.call(plot.default, resolve.defaults( list(x=p0, y=pX), list(...), list(col=col), list(xlim=c(0,1), ylim=c(0,1), xlab="Theoretical probability", ylab="Observed probability", main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }, QQ={ ## plot (FZX)^{-1} o FZ ## x-axis: order statistics z_[i] of values at data points ## y-axis: quantiles of Z on window with probabilities i/n qX <- get("x", environment(FZX)) pX <- get("y", environment(FZX)) FZinverse <- quantilefun(FZ) qZX <- FZinverse(pX) xlab <- paste("Theoretical quantile of", covname) ylab <- paste("Observed quantile of", covname) qZ <- get("x", environment(FZ)) Zrange <- range(qZ, qX, qZX) do.call(plot.default, resolve.defaults( list(x=qZX, y=qX), list(...), list(col=col), list(xlim=Zrange, ylim=Zrange, xlab=xlab, ylab=ylab, main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }) return(invisible(NULL)) } spatstat.explore/R/pcfFromK.R0000644000176200001440000000517314611073310015627 0ustar liggesusers#' #' pcfFromK.R #' #' Calculate pcf from other estimators of K or Kcross #' #' pcf.fv, pcf.fasp #' #' $Revision: 1.1 $ $Date: 2023/02/19 01:02:57 $ pcf.fasp <- function(X, ..., method="c") { verifyclass(X, "fasp") Y <- X Y$title <- paste("Array of pair correlation functions", if(!is.null(X$dataname)) "for", X$dataname) # go to work on each function for(i in seq_along(X$fns)) { Xi <- X$fns[[i]] PCFi <- pcf.fv(Xi, ..., method=method) Y$fns[[i]] <- PCFi if(is.fv(PCFi)) Y$default.formula[[i]] <- formula(PCFi) } return(Y) } pcf.fv <- local({ callmatched <- function(fun, argue) { formalnames <- names(formals(fun)) formalnames <- formalnames[formalnames != "..."] do.call(fun, argue[names(argue) %in% formalnames]) } pcf.fv <- function(X, ..., method="c") { verifyclass(X, "fv") # extract r and the recommended estimate of K r <- with(X, .x) K <- with(X, .y) alim <- attr(X, "alim") # remove NA's ok <- !is.na(K) K <- K[ok] r <- r[ok] switch(method, a = { ss <- callmatched(smooth.spline, list(x=r, y=K, ...)) dK <- predict(ss, r, deriv=1)$y g <- dK/(2 * pi * r) }, b = { y <- K/(2 * pi * r) y[!is.finite(y)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=y, ...)) dy <- predict(ss, r, deriv=1)$y g <- dy + y/r }, c = { z <- K/(pi * r^2) z[!is.finite(z)] <- 1 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- (r/2) * dz + z }, d = { z <- sqrt(K) z[!is.finite(z)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- z * dz/(pi * r) }, stop(paste("unrecognised method", sQuote(method))) ) # pack result into "fv" data frame Z <- fv(data.frame(r=r, theo=rep.int(1, length(r)), pcf=g), "r", substitute(g(r), NULL), "pcf", . ~ r, alim, c("r", "%s[pois](r)", "%s(r)"), c("distance argument r", "theoretical Poisson value of %s", "estimate of %s by numerical differentiation"), fname="g") unitname(Z) <- unitname(X) return(Z) } pcf.fv }) spatstat.explore/R/blur.R0000644000176200001440000000363114611073307015067 0ustar liggesusers# # blur.R # # apply Gaussian blur to an image # # $Revision: 1.25 $ $Date: 2020/11/30 07:16:06 $ # Smooth.im <- function(X, sigma=NULL, ..., kernel="gaussian", normalise=FALSE, bleed=TRUE, varcov=NULL) { blur(X, sigma=sigma, ..., kernel=kernel, normalise=normalise, bleed=bleed, varcov=varcov) } blur <- function(x, sigma=NULL, ..., kernel="gaussian", normalise=FALSE, bleed=TRUE, varcov=NULL) { stopifnot(is.im(x)) # determine smoothing kernel sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if (sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1, 2)) stopifnot(all(sigma > 0)) } if (varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov) == 2) ngiven <- varcov.given + sigma.given switch(ngiven + 1L, { sigma <- (1/8) * min(diff(x$xrange), diff(x$yrange)) }, { if (sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if (!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # replace NA's in image raster by zeroes X <- fillNA(x, 0) # convolve with Gaussian Y <- second.moment.calc(X, sigma=sigma, ..., kernel=kernel, varcov=varcov, what="smooth") # if no bleeding, we restrict data to the original boundary if(!bleed) Y$v[is.na(x$v)] <- NA # if(!normalise) return(Y) # normalisation: # convert original image to window (0/1 image) Xone <- x isna <- is.na(x$v) Xone$v[isna] <- 0 Xone$v[!isna] <- 1 # convolve with Gaussian Ydenom <- second.moment.calc(Xone, sigma=sigma, ..., kernel=kernel, varcov=varcov, what="smooth") # normalise Z <- eval.im(Y/Ydenom) return(Z) } spatstat.explore/R/bw.CvL.R0000644000176200001440000000206614611073307015217 0ustar liggesusers#' #' bandwidth selection method of Cronie and Van Lieshout #' #' $Revision: 1.1 $ $Date: 2019/09/30 08:01:20 $ bw.CvL <- function(X, ..., srange=NULL, ns=16, sigma=NULL, warn=TRUE){ stopifnot(is.ppp(X)) W <- Window(X) areaW <- area.owin(W) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(W)/2) } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma = si, at = "points", leaveoneout = FALSE, edge = FALSE) cv[i] <- ( sum(1/lamx) - areaW )^2 } result <- bw.optim(cv, sigma, iopt=which.min(cv), optimum="min", creator="bw.CvL", criterion="Cronie and van Lieshout", warnextreme=warn, hargnames="srange", unitname=unitname(X)) return(result) } spatstat.explore/R/Gmulti.R0000644000176200001440000001530314611073307015363 0ustar liggesusers# Gmulti.S # # Compute estimates of nearest neighbour distance distribution functions # for multitype point patterns # # S functions: # Gcross G_{ij} # Gdot G_{i\bullet} # Gmulti (generic) # # $Revision: 4.46 $ $Date: 2023/02/28 02:05:42 $ # ################################################################################ "Gcross" <- function(X, i, j, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han")) { # computes G_{ij} estimates # # X marked point pattern (of class 'ppp') # i,j the two mark values to be compared # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X, dfok=FALSE)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") if(i == j){ result <- Gest(X[I], r=r, breaks=breaks, ...) } else { J <- (marx == j) if(sum(J) == 0) stop("No points are of type j") result <- Gmulti(X, I, J, r=r, breaks=breaks, disjoint=FALSE, ..., correction=correction) } result <- rebadge.as.crossfun(result, "G", NULL, i, j) return(result) } "Gdot" <- function(X, i, r=NULL, breaks=NULL, ..., correction=c("km","rs","han")) { # Computes estimate of # G_{i\bullet}(t) = # P( a further point of pattern in B(0,t)| a type i point at 0 ) # # X marked point pattern (of class ppp) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") J <- rep.int(TRUE, X$n) # i.e. all points # result <- Gmulti(X, I, J, r, breaks, disjoint=FALSE, ..., correction=correction) result <- rebadge.as.dotfun(result, "G", NULL, i) return(result) } ########## "Gmulti" <- function(X, I, J, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=c("rs", "km", "han")) { # # engine for computing the estimate of G_{ij} or G_{i\bullet} # depending on selection of I, J # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # verifyclass(X, "ppp") W <- X$window npts <- npoints(X) areaW <- area(W) # check I and J I <- ppsubset(X, I, "I") J <- ppsubset(X, J, "J") if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop("No points satisfy condition I") if(nJ == 0) stop("No points satisfy condition J") if(is.null(disjoint)) disjoint <- !any(I & J) # choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("rs", "km", "han") correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) # determine breakpoints for r values lamJ <- nJ/areaW rmaxdefault <- rmax.rule("G", W, lamJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) # brks <- breaks$val rmax <- breaks$max rvals <- breaks$r zeroes <- numeric(length(rvals)) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lamJ * pi * rvals^2)) fname <- c("G", "list(I,J)") Z <- fv(df, "r", quote(G[I,J](r)), "theo", . ~ r, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(G[list(I,J)](r))) # "type I to type J" nearest neighbour distances XI <- X[I] XJ <- X[J] if(disjoint) nnd <- nncross(XI, XJ, what="dist") else { seqnp <- seq_len(npts) iX <- seqnp[I] iY <- seqnp[J] nnd <- nncross(XI, XJ, iX, iY, what="dist") } # distance to boundary from each type i point bdry <- bdist.points(XI) # observations o <- pmin.int(nnd,bdry) # censoring indicators d <- (nnd <= bdry) # # calculate estimates if("none" %in% correction) { # UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts == 0) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), makefvlabel(NULL, "hat", fname, "raw"), "uncorrected estimate of %s", "raw") } if("han" %in% correction) { # Hanisch style estimator if(npts == 0) G <- zeroes else { # uncensored distances x <- nnd[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } # add to fv object Z <- bind.fv(Z, data.frame(han=G), makefvlabel(NULL, "hat", fname, "han"), "Hanisch estimate of %s", "han") # modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes) else { result <- km.rs(o, bdry, d, breaks) result <- as.data.frame(result[c("rs", "km", "hazard")]) } # add to fv object Z <- bind.fv(Z, result, c(makefvlabel(NULL, "hat", fname, "bord"), makefvlabel(NULL, "hat", fname, "km"), "hazard(r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)"), "km") # modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) unitname(Z) <- unitname(X) return(Z) } spatstat.explore/R/auc.R0000644000176200001440000000533314611073307014674 0ustar liggesusers## ## auc.R ## ## Calculate ROC curve or area under it ## ## $Revision: 1.17 $ $Date: 2023/08/15 07:44:11 $ roc <- function(X, ...) { UseMethod("roc") } roc.ppp <- function(X, covariate, ..., high=TRUE) { nullmodel <- exactppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } rocData <- function(covariate, nullmodel, ..., high=TRUE, p=seq(0, 1, length=1024)) { d <- spatialCDFframe(nullmodel, covariate, ...) U <- d$values$U ec <- if(high) ecdf(1-U) else ecdf(U) if(!missing(p)) { check.nvector(p) stopifnot(min(p) >= 0) stopifnot(max(p) <= 1) if(prod(range(diff(p))) < 0) stop("p should be a monotone sequence") } df <- data.frame(p=p, fobs=ec(p), fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", fmla= . ~ p, desc=c("fraction of area", "observed fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "fnull") return(result) } rocModel <- function(lambda, nullmodel, ..., high, p=seq(0, 1, length=1024)) { if(!missing(high)) warning("Argument 'high' is ignored when computing ROC for a fitted model") d<- spatialCDFframe(nullmodel, lambda, ...) U <- d$values$U ec <- ecdf(1-U) if(!missing(p)) { check.nvector(p) stopifnot(min(p) >= 0) stopifnot(max(p) <= 1) if(prod(range(diff(p))) < 0) stop("p should be a monotone sequence") } fobs <- ec(p) FZ <- d$values$FZ FZinverse <- quantilefun.ewcdf(FZ) lambdavalues <- if(is.im(lambda)) lambda[] else unlist(lapply(lambda, "[")) F1Z <- ewcdf(lambdavalues, lambdavalues/sum(lambdavalues)) ftheo <- 1 - F1Z(FZinverse(1-p)) df <- data.frame(p=p, fobs=fobs, ftheo=ftheo, fnull=p) result <- fv(df, argu="p", ylab=quote(roc(p)), valu="fobs", fmla = . ~ p, desc=c("fraction of area", "observed fraction of points", "expected fraction of points", "expected fraction if no effect"), fname="roc") fvnames(result, ".") <- c("fobs", "ftheo", "fnull") return(result) } ## Code for roc.ppm, roc.slrm, roc.kppm is moved to spatstat.model # ...................................................... auc <- function(X, ...) { UseMethod("auc") } auc.ppp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(exactppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } ## Code for auc.ppm, auc.slrm, auc.kppm is moved to spatstat.model spatstat.explore/R/fv.R0000644000176200001440000014353014611073310014533 0ustar liggesusers## ## ## fv.R ## ## class "fv" of function value objects ## ## $Revision: 1.185 $ $Date: 2024/04/02 01:23:12 $ ## ## ## An "fv" object represents one or more related functions ## of the same argument, such as different estimates of the K function. ## ## It is a data.frame with additional attributes ## ## argu column name of the function argument (typically "r") ## ## valu column name of the recommended function ## ## ylab generic label for y axis e.g. K(r) ## ## fmla default plot formula ## ## alim recommended range of function argument ## ## labl recommended xlab/ylab for each column ## ## desc longer description for each column ## ## unitname name of unit of length for 'r' ## ## shade (optional) column names of upper & lower limits ## of shading - typically a confidence interval ## ## Objects of this class are returned by Kest(), etc ## ################################################################## ## creator fv <- function(x, argu="r", ylab=NULL, valu, fmla=NULL, alim=NULL, labl=names(x), desc=NULL, unitname=NULL, fname=NULL, yexp=ylab) { stopifnot(is.data.frame(x)) ## check arguments stopifnot(is.character(argu)) if(!is.null(ylab)) stopifnot(is.character(ylab) || is.language(ylab)) if(!missing(yexp)) { if(is.null(yexp)) yexp <- ylab else stopifnot(is.language(yexp)) } stopifnot(is.character(valu)) if(!(argu %in% names(x))) stop(paste(sQuote("argu"), "must be the name of a column of x")) if(!(valu %in% names(x))) stop(paste(sQuote("valu"), "must be the name of a column of x")) if(is.null(fmla)) fmla <- paste(valu, "~", argu) else if(inherits(fmla, "formula")) { ## convert formula to string fmla <- flat.deparse(fmla) } else if(!is.character(fmla)) stop(paste(sQuote("fmla"), "should be a formula or a string")) if(missing(alim)) { ## Note: if alim is given as NULL, it is not changed. argue <- x[[argu]] alim <- range(argue[is.finite(argue)]) } else if(!is.null(alim)) { if(!is.numeric(alim) || length(alim) != 2) stop(paste(sQuote("alim"), "should be a vector of length 2")) } if(!is.character(labl)) stop(paste(sQuote("labl"), "should be a vector of strings")) stopifnot(length(labl) == ncol(x)) if(is.null(desc)) desc <- character(ncol(x)) else { stopifnot(is.character(desc)) stopifnot(length(desc) == ncol(x)) nbg <- is.na(desc) if(any(nbg)) desc[nbg] <- "" } if(!is.null(fname)) stopifnot(is.character(fname) && length(fname) %in% 1:2) ## pack attributes attr(x, "argu") <- argu attr(x, "valu") <- valu attr(x, "ylab") <- ylab attr(x, "yexp") <- yexp attr(x, "fmla") <- fmla attr(x, "alim") <- alim attr(x, "labl") <- labl attr(x, "desc") <- desc attr(x, "units") <- as.unitname(unitname) attr(x, "fname") <- fname attr(x, "dotnames") <- NULL attr(x, "shade") <- NULL ## class(x) <- c("fv", class(x)) return(x) } .Spatstat.FvAttrib <- c( "argu", "valu", "ylab", "yexp", "fmla", "alim", "labl", "desc", "units", "fname", "dotnames", "shade") ## putSpatstatVariable("FvAttrib", .Spatstat.FvAttrib) as.data.frame.fv <- function(x, ...) { stopifnot(is.fv(x)) fva <- .Spatstat.FvAttrib attributes(x)[fva] <- NULL class(x) <- "data.frame" x } #' is.fv() is now defined in spatstat.geom/R/is.R ## as.fv <- function(x) { UseMethod("as.fv") } as.fv.fv <- function(x) x as.fv.data.frame <- function(x) { if(ncol(x) < 2) stop("Need at least 2 columns") return(fv(x, names(x)[1L], , names(x)[2L])) } as.fv.matrix <- function(x) { y <- as.data.frame(x) if(any(bad <- is.na(names(y)))) names(y)[bad] <- paste0("V", which(bad)) return(as.fv.data.frame(y)) } ## other methods for as.fv are described in the files for the relevant classes. vanilla.fv <- function(x) { ## remove everything except basic fv characteristics retain <- c("names", "row.names", .Spatstat.FvAttrib) attributes(x) <- attributes(x)[retain] class(x) <- c("fv", "data.frame") return(x) } print.fv <- local({ maxwords <- function(z, m) { max(0, which(cumsum(nchar(z) + 1) <= m+1)) } usewords <- function(z, n) paste(z[1:n], collapse=" ") print.fv <- function(x, ..., tight=FALSE) { verifyclass(x, "fv") terselevel <- spatstat.options("terse") showlabels <- waxlyrical('space', terselevel) showextras <- waxlyrical('extras', terselevel) nama <- names(x) a <- attributes(x) if(!is.null(ylab <- a$ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) } if(!inherits(x, "envelope")) { splat("Function value object", paren(paste("class", sQuote("fv")))) if(!is.null(ylab)) { xlab <- fvlabels(x, expand=TRUE)[[a$argu]] splat("for the function", xlab, "->", ylab) } } ## Descriptions .. desc <- a$desc ## .. may require insertion of ylab if(!is.null(ylab) && any(grepl("%s", desc))) desc <- sprintf(desc, ylab) ## Labels .. labl <- fvlabels(x, expand=TRUE) ## Avoid overrunning text margin maxlinewidth <- options('width')[[1L]] key.width <- max(nchar(nama)) labl.width <- if(showlabels) max(nchar(labl), nchar("Math.label")) else 0 desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 if(fullwidth > maxlinewidth && tight) { ## try shortening the descriptions so that it all fits on one line spaceleft <- maxlinewidth - (key.width + labl.width + 2) desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } spaceleft <- maxlinewidth - (key.width + 1) if(desc.width > spaceleft) { ## Descriptions need to be truncated to max line width desc <- truncline(desc, spaceleft) desc.width <- max(nchar(desc), nchar("Description")) fullwidth <- key.width + labl.width + desc.width + 2 } if(showextras) { fullwidth <- pmin(maxlinewidth, fullwidth) fullline <- paste0(rep(".", fullwidth), collapse="") cat(fullline, fill=TRUE) } df <- data.frame(Math.label=labl, Description=desc, row.names=nama, stringsAsFactors=FALSE) if(!showlabels) df <- df[,-1L,drop=FALSE] print(df, right=FALSE) ## if(showextras) { cat(fullline, fill=TRUE) splat("Default plot formula: ", flat.deparse(as.formula(a$fmla))) splat("where", dQuote("."), "stands for", commasep(sQuote(fvnames(x, ".")), ", ")) if(length(a$shade)) splat("Columns", commasep(sQuote(a$shade)), "will be plotted as shading (by default)") alim <- a$alim splat("Recommended range of argument", paste0(a$argu, ":"), if(!is.null(alim)) prange(signif(alim, 5)) else "not specified") rang <- signif(range(with(x, .x)), 5) splat("Available range", "of argument", paste0(a$argu, ":"), prange(rang)) ledge <- summary(unitname(x))$legend if(!is.null(ledge)) splat(ledge) } return(invisible(NULL)) } print.fv }) ## manipulating the names in fv objects .Spatstat.FvAbbrev <- c( ".x", ".y", ".s", ".", "*", ".a") ## putSpatstatVariable("FvAbbrev", .Spatstat.FvAbbrev) fvnames <- function(X, a=".") { verifyclass(X, "fv") if(!is.character(a)) stop("argument a must be a character string") if(length(a) != 1) return(lapply(a, function(b, Z) fvnames(Z, b), Z=X)) namesX <- names(X) if(a %in% namesX) return(a) vnames <- setdiff(namesX, attr(X, "argu")) answer <- switch(a, ".y" = attr(X, "valu"), ".x" = attr(X, "argu"), ".s" = attr(X, "shade"), ".a" = vnames, "*" = rev(vnames), "." = attr(X, "dotnames") %orifnull% rev(vnames), { stop(paste("Unrecognised abbreviation", sQuote(a)), call.=FALSE) }) return(answer) } "fvnames<-" <- function(X, a=".", value) { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop(paste("argument", sQuote("a"), "must be a character string")) ## special cases if(a == "." && length(value) == 0) { ## clear the dotnames attr(X, "dotnames") <- NULL return(X) } if(a == ".a" || a == "*") { warning("Nothing changed; use names(X) <- value to change names", call.=FALSE) return(X) } ## validate the names switch(a, ".x"=, ".y"={ if(!is.character(value) || length(value) != 1) stop("value should be a single string") }, ".s"={ if(!is.character(value) || length(value) != 2) stop("value should be a vector of 2 character strings") }, "."={ if(!is.character(value)) stop("value should be a character vector") }, stop(paste("Unrecognised abbreviation", dQuote(a))) ) ## check the names match existing column names tags <- names(X) if(any(nbg <- !(value %in% tags))) stop(paste(ngettext(sum(nbg), "The string", "The strings"), commasep(dQuote(value[nbg])), ngettext(sum(nbg), "does not match the name of any column of X", "do not match the names of any columns of X"))) ## reassign names switch(a, ".x"={ attr(X, "argu") <- value }, ".y"={ attr(X, "valu") <- value }, ".s"={ attr(X, "shade") <- value }, "."={ attr(X, "dotnames") <- value }) return(X) } "names<-.fv" <- function(x, value) { nama <- colnames(x) indx <- which(nama == fvnames(x, ".x")) indy <- which(nama == fvnames(x, ".y")) inds <- which(nama %in% fvnames(x, ".s")) ind. <- which(nama %in% fvnames(x, ".")) ## rename columns of data frame x <- NextMethod("names<-") ## adjust other tags fvnames(x, ".x") <- value[indx] fvnames(x, ".y") <- value[indy] fvnames(x, ".") <- value[ind.] if(length(inds)) fvnames(x, ".s") <- value[inds] namemap <- setNames(lapply(value, as.name), nama) formula(x) <- flat.deparse(eval(substitute(substitute(fom, um), list(fom=as.formula(formula(x)), um=namemap)))) return(x) } fvlabels <- function(x, expand=FALSE) { lab <- attr(x, "labl") if(expand && !is.null(fname <- attr(x, "fname"))) { ## expand plot labels using function name nwanted <- substringcount("%s", lab) ngiven <- length(fname) if(any(0 < nwanted & nwanted < ngiven)) warning("Internal error: fvlabels truncated the function name", call.=FALSE) nlacking <- max(nwanted) - ngiven if(nlacking > 0) { ## pad with blanks fname <- c(fname, rep("", nlacking)) } fnamelist <- as.list(fname) for(i in which(nwanted > 0)) lab[i] <- do.call(sprintf, append(list(lab[i]), fnamelist[1:nwanted[i]])) } ## remove empty space lab <- gsub(" ", "", lab) names(lab) <- names(x) return(lab) } "fvlabels<-" <- function(x, value) { stopifnot(is.fv(x)) stopifnot(is.character(value)) stopifnot(length(value) == ncol(x)) attr(x, "labl") <- value return(x) } flatfname <- function(x) { fn <- if(is.character(x)) x else attr(x, "fname") if(length(fn) > 1) fn <- paste0(fn[1L], "[", paste(fn[-1L], collapse=" "), "]") as.name(fn) } makefvlabel <- function(op=NULL, accent=NULL, fname, sub=NULL, argname="r") { ## de facto standardised label a <- "%s" if(!is.null(accent)) a <- paste0(accent, paren(a)) ## eg hat(%s) if(!is.null(op)) a <- paste0("bold", paren(op), "~", a) ## eg bold(var)~hat(%s) if(is.null(sub)) { if(length(fname) != 1) { a <- paste0(a, "[%s]") a <- paren(a, "{") } } else { if(length(fname) == 1) { a <- paste0(a, paren(sub, "[")) } else { a <- paste0(a, paren("%s", "["), "^", paren(sub, "{")) a <- paren(a, "{") } } a <- paste0(a, paren(argname)) return(a) } fvlabelmap <- local({ magic <- function(x) { subx <- paste("substitute(", x, ", NULL)") out <- try(eval(parse(text=subx)), silent=TRUE) if(inherits(out, "try-error")) out <- as.name(make.names(subx)) out } fvlabelmap <- function(x, dot=TRUE) { labl <- fvlabels(x, expand=TRUE) ## construct mapping from identifiers to labels map <- as.list(labl) map <- lapply(map, magic) names(map) <- colnames(x) if(dot) { ## also map "." and ".a" to name of target function if(!is.null(ye <- attr(x, "yexp"))) map <- append(map, list("."=ye, ".a"=ye)) ## map other fvnames to their corresponding labels map <- append(map, list(".x"=map[[fvnames(x, ".x")]], ".y"=map[[fvnames(x, ".y")]])) if(length(fvnames(x, ".s"))) { shex <- unname(map[fvnames(x, ".s")]) shadexpr <- substitute(c(A,B), list(A=shex[[1L]], B=shex[[2L]])) map <- append(map, list(".s" = shadexpr)) } } return(map) } fvlabelmap }) ## map from abbreviations to expressions involving the column names, ## for use in eval(substitute(...)) fvexprmap <- function(x) { dotnames <- fvnames(x, ".") u <- if(length(dotnames) == 1) as.name(dotnames) else as.call(lapply(c("cbind", dotnames), as.name)) ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) umap <- list(.=u, .a=u, .x=ux, .y=uy) if(length(shnm <- fvnames(x, ".s"))) { shadexpr <- substitute(cbind(A,B), list(A=as.name(shnm[1L]), B=as.name(shnm[2L]))) umap <- append(umap, list(.s = shadexpr)) } return(umap) } fvlegend <- local({ fvlegend <- function(object, elang) { ## Compute mathematical legend(s) for column(s) in fv object ## transformed by language expression 'elang'. ## The expression must already be in 'expanded' form. ## The result is an expression, or expression vector. ## The j-th entry of the vector is an expression for the ## j-th column of function values. ee <- distributecbind(as.expression(elang)) map <- fvlabelmap(object, dot = TRUE) eout <- as.expression(lapply(ee, invokemap, map=map)) return(eout) } invokemap <- function(ei, map) { eval(substitute(substitute(e, mp), list(e = ei, mp = map))) } fvlegend }) bind.fv <- function(x, y, labl=NULL, desc=NULL, preferred=NULL, clip=FALSE) { verifyclass(x, "fv") ax <- attributes(x) if(is.fv(y)) { ## y is already an fv object ay <- attributes(y) if(!identical(ax$fname, ay$fname)) { ## x and y represent different functions ## expand the labels separately fvlabels(x) <- fvlabels(x, expand=TRUE) fvlabels(y) <- fvlabels(y, expand=TRUE) ax <- attributes(x) ay <- attributes(y) } ## check compatibility of 'r' values xr <- ax$argu yr <- ay$argu rx <- x[[xr]] ry <- y[[yr]] if(length(rx) != length(ry)) { if(!clip) stop("fv objects x and y have incompatible domains") # restrict both objects to a common domain ra <- intersect.ranges(range(rx), range(ry)) x <- x[inside.range(rx, ra), ] y <- y[inside.range(ry, ra), ] rx <- x[[xr]] ry <- y[[yr]] } if(length(rx) != length(ry) || max(abs(rx-ry)) > .Machine$double.eps) stop("fv objects x and y have incompatible values of r") ## reduce y to data frame and strip off 'r' values ystrip <- as.data.frame(y) yrpos <- which(colnames(ystrip) == yr) ystrip <- ystrip[, -yrpos, drop=FALSE] ## determine descriptors if(is.null(labl)) labl <- attr(y, "labl")[-yrpos] if(is.null(desc)) desc <- attr(y, "desc")[-yrpos] ## y <- ystrip } else if(is.function(y)) { ## evaluate the function 'y' at the argument values of the first object xvals <- x[[fvnames(x, ".x")]] yvals <- y(xvals) y <- data.frame(y=yvals) } else { ## y is a matrix or data frame y <- as.data.frame(y) } ## check for duplicated column names allnames <- c(colnames(x), colnames(y)) if(any(dup <- duplicated(allnames))) { nbg <- unique(allnames[dup]) nn <- length(nbg) warning(paste("The column", ngettext(nn, "name", "names"), commasep(sQuote(nbg)), ngettext(nn, "was", "were"), "duplicated. Unique names were generated")) allnames <- make.names(allnames, unique=TRUE, allow_ = FALSE) colnames(y) <- allnames[ncol(x) + seq_len(ncol(y))] } if(is.null(labl)) labl <- paste("%s[", colnames(y), "](r)", sep="") else if(length(labl) != ncol(y)) stop(paste("length of", sQuote("labl"), "does not match number of columns of y")) if(is.null(desc)) desc <- character(ncol(y)) else if(length(desc) != ncol(y)) stop(paste("length of", sQuote("desc"), "does not match number of columns of y")) if(is.null(preferred)) preferred <- ax$valu xy <- cbind(as.data.frame(x), y) z <- fv(xy, ax$argu, ax$ylab, preferred, ax$fmla, ax$alim, c(ax$labl, labl), c(ax$desc, desc), unitname=unitname(x), fname=ax$fname, yexp=ax$yexp) return(z) } cbind.fv <- function(...) { a <- list(...) n <- length(a) if(n == 0) return(NULL) if(n == 1) { ## single argument - extract it a <- a[[1L]] ## could be an fv object if(is.fv(a)) return(a) ## a is a list of arguments n <- length(a) } ## First argument is template z <- a[[1L]] if(!is.fv(z)) stop("First argument should be an object of class fv") ## Subsequent arguments if(n > 1) { ## save dotnames (only those explicitly given in data -- no 'guessing') dn <- fvnames(z, ".") if(any(isfun <- sapply(a, is.function))) { ## Function(s) will be applied to 'r' values xvals <- z[[fvnames(z, ".x")]] ## Determine variable names using names in cbind call, or default nama <- good.names(names(a), "V", 1:n) } for(i in 2:n) { ai <- a[[i]] if(isfun[i]) { ## apply function to 'r' values yvals <- ai(xvals) ## convert to data frame ai <- data.frame(y=yvals) colnames(ai) <- nama[i] } else if(is.fv(ai)) { ## collect more dotnames dn <- c(dn, fvnames(ai, ".")) } z <- bind.fv(z, ai) } ## reattach dotnames fvnames(z, ".") <- dn } return(z) } collapse.anylist <- collapse.fv <- local({ collapse.fv <- function(object, ..., same=NULL, different=NULL) { if(is.fv(object)) { x <- list(object, ...) } else if(inherits(object, "anylist")) { x <- append(object, list(...)) } else if(is.list(object) && all(sapply(object, is.fv))) { x <- append(object, list(...)) } else stop("Format not understood") if(!all(sapply(x, is.fv))) stop("arguments should be objects of class fv") same <- as.character(same) different <- as.character(different) if(anyDuplicated(c(same, different))) stop(paste("The arguments", sQuote("same"), "and", sQuote("different"), "should not have entries in common")) ## handle function argument xname <- unique(sapply(x, fvnames, a=".x")) if(length(xname) > 1) stop(paste("Objects have different names for the function argument:", commasep(sQuote(xname)))) xalias <- c(xname, ".x") same <- setdiff(same, xalias) different <- setdiff(different, xalias) ## dotnames alldotnames <- unique(unlist(lapply(x, fvnames, a="."))) ## validate either <- c(same, different) if(length(either) == 0) stop(paste("At least one column of function values must be selected", "using the arguments", sQuote("same"), "and/or", sQuote("different"))) mussung <- lapply(x, missingnames, expected=either) nbg <- Reduce(intersect, mussung) if((nbad <- length(nbg)) > 0) stop(paste(ngettext(nbad, "The column", "The columns"), commasep(sQuote(nbg)), ngettext(nbad, "is", "are"), "not present in any of the function objects")) ## .............. same .................................... ## extract the common values nsame <- length(same) if(nsame == 0) { ## Initialise using first object y <- x[[1L]] xname <- fvnames(y, ".x") yname <- fvnames(y, ".y") ## The column of 'preferred values' .y cannot be deleted. ## retain .y for now and delete it later. z <- y[, c(xname, yname)] } else { ## Find first object that contains same[1L] same1 <- same[1L] j <- min(which(sapply(x, isRecognised, expected=same1))) y <- x[[j]] xname <- fvnames(y, ".x") yname <- fvnames(y, ".y") ## possibly expand abbreviation same[1L] <- same1 <- fvnames(y, same1) if(yname != same1) yname <- fvnames(y, ".y") <- same1 z <- y[, c(xname, yname)] if(nsame > 1) { ## Find objects that contain same[2], ..., for(k in 2:nsame) { samek <- same[k] j <- min(which(sapply(x, isRecognised, expected=samek))) xj <- x[[j]] same[k] <- samek <- fvnames(xj, samek) ## identify relevant column in object xj wanted <- (names(xj) == samek) if(any(wanted)) { y <- as.data.frame(xj)[, wanted, drop=FALSE] desc <- attr(xj, "desc")[wanted] labl <- attr(xj, "labl")[wanted] ## glue onto fv object z <- bind.fv(z, y, labl=labl, desc=desc) } } } } dotnames <- intersect(same, alldotnames) ## .............. different ............................. ## create names for different versions versionnames <- good.names(names(x), "f", seq_along(x)) shortnames <- abbreviate(versionnames, minlength=12) ## now merge the different values if(length(different)) { for(i in seq_along(x)) { ## extract values for i-th object xi <- x[[i]] diffi <- availablenames(xi, different) # which columns are available diffi <- unlist(fvnames(xi, diffi)) # expand abbreviations if used ## identify current position of columns wanted <- (names(xi) %in% diffi) if(any(wanted)) { y <- as.data.frame(xi)[, wanted, drop=FALSE] desc <- attr(xi, "desc")[wanted] labl <- attr(xi, "labl")[wanted] indots <- names(y) %in% alldotnames ## relabel prefix <- shortnames[i] preamble <- versionnames[i] names(y) <- if(ncol(y) == 1) prefix else paste(prefix,names(y),sep="") dotnames <- c(dotnames, names(y)[indots]) ## glue onto fv object z <- bind.fv(z, y, labl=paste(prefix, labl, sep="~"), desc=paste(preamble, desc)) } } } if(length(same) == 0) { ## remove the second column which was retained earlier fvnames(z, ".y") <- names(z)[3L] z <- z[, -2L] } fvnames(z, ".") <- dotnames return(z) } isRecognised <- function(z, expected) { known <- c(names(z), .Spatstat.FvAbbrev) !is.na(match(expected, known)) } missingnames <- function(z, expected) { expected[!isRecognised(z, expected)] } availablenames <- function(z, expected){ expected[isRecognised(z, expected)] } collapse.fv }) ## rename one of the columns of an fv object tweak.fv.entry <- function(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) { hit <- (names(x) == current.tag) if(!any(hit)) return(x) ## update descriptions of column i <- min(which(hit)) if(!is.null(new.labl)) attr(x, "labl")[i] <- new.labl if(!is.null(new.desc)) attr(x, "desc")[i] <- new.desc ## adjust column tag if(!is.null(new.tag)) { names(x)[i] <- new.tag ## update dotnames dn <- fvnames(x, ".") if(current.tag %in% dn ) { dn[dn == current.tag] <- new.tag fvnames(x, ".") <- dn } ## if the tweaked column is the preferred value, adjust accordingly if(attr(x, "valu") == current.tag) attr(x, "valu") <- new.tag ## if the tweaked column is the function argument, adjust accordingly if(attr(x, "argu") == current.tag) attr(x, "valu") <- new.tag } return(x) } ## change some or all of the auxiliary text in an fv object rebadge.fv <- function(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp=new.ylab, new.dotnames, new.preferred, new.formula, new.tags) { if(!missing(new.ylab)) attr(x, "ylab") <- new.ylab if(!missing(new.yexp) || !missing(new.ylab)) attr(x, "yexp") <- new.yexp if(!missing(new.fname)) attr(x, "fname") <- new.fname if(!missing(new.desc) || !missing(new.labl) || !missing(new.tags)) { ## replace (some or all entries of) the following desc <- attr(x, "desc") labl <- attr(x, "labl") nama <- names(x) ## specified subset to be replaced if(missing(tags) || is.null(tags)) tags <- nama ## match up m <- match(tags, nama) ok <- !is.na(m) mok <- m[ok] ## replace if(!missing(new.desc)) { desc[mok] <- new.desc[ok] attr(x, "desc") <- desc } if(!missing(new.labl)) { labl[mok] <- new.labl[ok] attr(x, "labl") <- labl } if(!missing(new.tags)) { ## rename columns (using "fvnames<-" to adjust special entries) names(x)[mok] <- new.tags[ok] } } if(!missing(new.dotnames)) fvnames(x, ".") <- new.dotnames if(!missing(new.preferred)) { stopifnot(new.preferred %in% names(x)) attr(x, "valu") <- new.preferred } if(!missing(new.formula)) formula(x) <- new.formula return(x) } ## common invocations to label a function like Kdot or Kcross rebadge.as.crossfun <- function(x, main, sub=NULL, i, j) { i <- make.parseable(paste(i)) j <- make.parseable(paste(j)) if(is.null(sub)) { ## single function name like "K" ylab <- substitute(main[i, j](r), list(main=main, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(i, j, sep=",")))) yexp <- substitute(main[list(i, j)](r), list(main=main, i=i, j=j)) } else { ## subscripted function name like "K[inhom]" ylab <- substitute(main[sub, i, j](r), list(main=main, sub=sub, i=i, j=j)) fname <- c(main, paste0("list", paren(paste(sub, i, j, sep=",")))) yexp <- substitute(main[list(sub, i, j)](r), list(main=main, sub=sub, i=i, j=j)) } labl <- rebadgeLabels(x, fname) y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp, new.labl=labl) return(y) } rebadge.as.dotfun <- function(x, main, sub=NULL, i) { i <- make.parseable(paste(i)) if(is.null(sub)) { ## single function name like "K" ylab <- substitute(main[i ~ dot](r), list(main=main, i=i)) fname <- c(main, paste0(i, "~symbol(\"\\267\")")) yexp <- substitute(main[i ~ symbol("\267")](r), list(main=main, i=i)) } else { ## subscripted function name like "K[inhom]" ylab <- substitute(main[sub, i ~ dot](r), list(main=main, sub=sub, i=i)) fname <- c(main, paste0("list", paren(paste0(sub, ",", i, "~symbol(\"\\267\")")))) yexp <- substitute(main[list(sub, i ~ symbol("\267"))](r), list(main=main, sub=sub, i=i)) } labl <- rebadgeLabels(x, fname) y <- rebadge.fv(x, new.ylab=ylab, new.fname=fname, new.yexp=yexp, new.labl=labl) return(y) } rebadgeLabels <- function(x, new.fname) { fname <- attr(x, "fname") labl <- attr(x, "labl") if(length(fname) == 1L && length(new.fname) == 2L) { ## Existing function name is unsubscripted like "K" ## New function name is subscripted like "K[inhom]" ## Modify label format strings to accommodate subscripted name new.labl <- gsub("%s[", "{%s[%s]^{", labl, fixed = TRUE) new.labl <- gsub("hat(%s)[", "{hat(%s)[%s]^{", new.labl, fixed = TRUE) argu <- attr(x, "argu") new.labl <- gsub(paste0("](",argu,")"), paste0("}}(", argu, ")"), new.labl, fixed = TRUE) new.labl } else labl } ## even simpler wrapper for rebadge.fv rename.fv <- function(x, fname, ylab, yexp=ylab) { stopifnot(is.fv(x)) stopifnot(is.character(fname) && (length(fname) %in% 1:2)) argu <- fvnames(x, ".x") if(missing(ylab) || is.null(ylab)) ylab <- switch(length(fname), substitute(fn(argu), list(fn=as.name(fname), argu=as.name(argu))), substitute(fn[fsub](argu), list(fn=as.name(fname[1]), fsub=as.name(fname[2]), argu=as.name(argu)))) if(missing(yexp) || is.null(yexp)) yexp <- ylab y <- rebadge.fv(x, new.fname=fname, new.ylab=ylab, new.yexp=yexp) return(y) } ## subset extraction operator "[.fv" <- function(x, i, j, ..., drop=FALSE) { igiven <- !missing(i) jgiven <- !missing(j) y <- as.data.frame(x) if(igiven && jgiven) z <- y[i, j, drop=drop] else if(igiven) z <- y[i, , drop=drop] else if(jgiven) z <- y[ , j, drop=drop] else z <- y ## return only the selected values as a data frame or vector. if(drop) return(z) if(!jgiven) selected <- seq_len(ncol(x)) else { nameindices <- seq_along(names(x)) names(nameindices) <- names(x) selected <- as.vector(nameindices[j]) } # validate choice of selected/dropped columns nama <- names(z) argu <- attr(x, "argu") if(!(argu %in% nama)) stop(paste("The function argument", sQuote(argu), "must not be removed")) valu <- attr(x, "valu") if(!(valu %in% nama)) stop(paste("The default column of function values", sQuote(valu), "must not be removed")) # if the plot formula involves explicit mention of dropped columns, # replace it by a generic formula fmla <- as.formula(attr(x, "fmla")) if(!all(variablesinformula(fmla) %in% nama)) fmla <- as.formula(. ~ .x, env=environment(fmla)) ## If range of argument was implicitly changed, adjust "alim" alim <- attr(x, "alim") rang <- range(z[[argu]]) alim <- intersect.ranges(alim, rang, fatal=FALSE) result <- fv(z, argu=attr(x, "argu"), ylab=attr(x, "ylab"), valu=attr(x, "valu"), fmla=fmla, alim=alim, labl=attr(x, "labl")[selected], desc=attr(x, "desc")[selected], unitname=attr(x, "units"), fname=attr(x,"fname"), yexp=attr(x, "yexp")) ## carry over preferred names, if possible dotn <- fvnames(x, ".") fvnames(result, ".") <- dotn[dotn %in% colnames(result)] shad <- fvnames(x, ".s") if(length(shad) && all(shad %in% colnames(result))) fvnames(result, ".s") <- shad return(result) } ## Subset and column replacement methods ## to guard against deletion of columns "[<-.fv" <- function(x, i, j, value) { if(!missing(j)) { ## check for alterations to structure of object if((is.character(j) && !all(j %in% colnames(x))) || (is.numeric(j) && any(j > ncol(x)))) stop("Use bind.fv to add new columns to an object of class fv") if(is.null(value) && missing(i)) { ## column(s) will be removed co <- seq_len(ncol(x)) names(co) <- colnames(x) keepcol <- setdiff(co, co[j]) return(x[ , keepcol, drop=FALSE]) } } NextMethod("[<-") } "$<-.fv" <- function(x, name, value) { j <- which(colnames(x) == name) if(is.null(value)) { ## column will be removed if(length(j) != 0) return(x[, -j, drop=FALSE]) return(x) } if(length(j) == 0) { ## new column df <- data.frame(1:nrow(x), value)[,-1L,drop=FALSE] colnames(df) <- name y <- bind.fv(x, df, desc=paste("Additional variable", sQuote(name))) return(y) } NextMethod("$<-") } ## method for 'formula' formula.fv <- function(x, ...) { attr(x, "fmla") } # new generic "formula<-" <- function(x, ..., value) { UseMethod("formula<-") } "formula<-.fv" <- function(x, ..., value) { if(is.null(value)) value <- paste(fvnames(x, ".y"), "~", fvnames(x, ".x")) else if(inherits(value, "formula")) { ## convert formula to string value <- flat.deparse(value) } else if(!is.character(value)) stop("Assignment value should be a formula or a string") attr(x, "fmla") <- value return(x) } ## method for with() with.fv <- function(data, expr, ..., fun=NULL, enclos=NULL) { if(any(names(list(...)) == "drop")) stop("Outdated argument 'drop' used in with.fv") cl <- short.deparse(sys.call()) verifyclass(data, "fv") if(is.null(enclos)) enclos <- parent.frame() ## convert syntactic expression to 'expression' object # e <- as.expression(substitute(expr)) ## convert syntactic expression to call elang <- substitute(expr) ## map "." etc to names of columns of data datanames <- names(data) xname <- fvnames(data, ".x") yname <- fvnames(data, ".y") ux <- as.name(xname) uy <- as.name(yname) dnames <- intersect(datanames, fvnames(data, ".")) ud <- as.call(lapply(c("cbind", dnames), as.name)) anames <- intersect(datanames, fvnames(data, ".a")) ua <- as.call(lapply(c("cbind", anames), as.name)) if(length(snames <- fvnames(data, ".s"))) { snames <- intersect(datanames, snames) us <- as.call(lapply(c("cbind", snames), as.name)) } else us <- NULL expandelang <- eval(substitute(substitute(ee, list(.=ud, .x=ux, .y=uy, .s=us, .a=ua)), list(ee=elang))) dont.complain.about(ua, ud, us, ux, uy) evars <- all.vars(expandelang) used.dotnames <- evars[evars %in% dnames] ## evaluate expression datadf <- as.data.frame(data) results <- eval(expandelang, as.list(datadf), enclos=enclos) ## -------------------- ## commanded to return numerical values only? if(!is.null(fun) && !fun) return(results) if(!is.matrix(results) && !is.data.frame(results)) { ## result is a vector if(is.null(fun)) fun <- FALSE if(!fun || length(results) != nrow(datadf)) return(results) results <- matrix(results, ncol=1) } else { ## result is a matrix or data frame if(is.null(fun)) fun <- TRUE if(!fun || nrow(results) != nrow(datadf)) return(results) } ## result is a matrix or data frame of the right dimensions ## make a new fv object ## ensure columns of results have names if(is.null(colnames(results))) colnames(results) <- paste("col", seq_len(ncol(results)), sep="") resultnames <- colnames(results) ## get values of function argument xvalues <- datadf[[xname]] ## tack onto result matrix results <- cbind(xvalues, results) colnames(results) <- c(xname, resultnames) results <- data.frame(results) ## check for alteration of column names oldnames <- resultnames resultnames <- colnames(results)[-1L] if(any(resultnames != oldnames)) warning("some column names were illegal and have been changed") ## determine mapping (if any) from columns of output to columns of input namemap <- match(colnames(results), names(datadf)) okmap <- !is.na(namemap) ## Build up fv object ## decide which of the columns should be the preferred value newyname <- if(yname %in% resultnames) yname else resultnames[1L] ## construct default plot formula fmla <- flat.deparse(as.formula(paste(". ~", xname))) dotnames <- resultnames ## construct description strings desc <- character(ncol(results)) desc[okmap] <- attr(data, "desc")[namemap[okmap]] desc[!okmap] <- paste("Computed value", resultnames[!okmap]) ## function name (fname) and mathematical expression for function (yexp) oldyexp <- attr(data, "yexp") oldfname <- attr(data, "fname") if(is.null(oldyexp)) { fname <- cl yexp <- substitute(f(xname), list(f=as.name(fname), xname=as.name(xname))) } else { ## map 'cbind(....)' to "." for name of function only cb <- paste("cbind(", paste(used.dotnames, collapse=","), ")", sep="") compresselang <- gsub(cb, ".", flat.deparse(expandelang), fixed=TRUE) compresselang <- as.formula(paste(compresselang, "~1"))[[2L]] ## construct mapping using original function name labmap <- fvlabelmap(data, dot=TRUE) labmap[["."]] <- oldyexp yexp <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap))) labmap2 <- labmap labmap2[["."]] <- as.name(oldfname) fname <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap2))) fname <- paren(flat.deparse(fname)) } ## construct mathematical labels mathlabl <- as.character(fvlegend(data, expandelang)) mathlabl <- gsub("[[:space:]]+", " ", mathlabl) labl <- colnames(results) mathmap <- match(labl, used.dotnames) okmath <- !is.na(mathmap) labl[okmath] <- mathlabl[mathmap[okmath]] ## form fv object and return out <- fv(results, argu=xname, valu=newyname, labl=labl, desc=desc, alim=attr(data, "alim"), fmla=fmla, unitname=unitname(data), fname=fname, yexp=yexp, ylab=yexp) fvnames(out, ".") <- dotnames return(out) } ## integral of fv object integral.fv <- function(f, domain=NULL, ...) { verifyclass(f, "fv") df <- as.data.frame(f) xname <- fvnames(f, ".x") x <- df[,xname] if(!is.null(domain)) { check.range(domain) xr <- range(x) if(!all(inside.range(domain, xr))) warning(paste("domain of integration", prange(domain), "was clipped to the available range of function values", prange(xr)), call.=FALSE) ok <- inside.range(x, domain) df <- df[ok, , drop=FALSE] x <- x[ok] } integrands <- as.matrix(df[, colnames(df) != xname, drop=FALSE]) dx <- diff(x) #' trapezoidal rule wx <- (c(dx, 0) + c(0, dx))/2 ans <- wx %*% integrands return(ans[1,]) } ## Stieltjes integration for fv objects StieltjesCalc.fv <- function(M, f, ...) { verifyclass(M, "fv") ## integration variable argu <- attr(M, "argu") x <- M[[argu]] ## values of integrand fx <- f(x, ...) ## estimates of measure valuenames <- names(M) [names(M) != argu] Mother <- as.data.frame(M)[, valuenames] Mother <- as.matrix(Mother, nrow=nrow(M)) ## increments of measure dM <- apply(Mother, 2, diff) dM <- rbind(dM, 0) ## integrate f(x) dM(x) f.dM <- fx * dM f.dM[!is.finite(f.dM)] <- 0 results <- colSums(f.dM) results <- as.list(results) names(results) <- valuenames return(results) } prefixfv <- function(x, tagprefix="", descprefix="", lablprefix=tagprefix, whichtags=fvnames(x, "*")) { ## attach a prefix to fv information stopifnot(is.fv(x)) att <- attributes(x) relevant <- names(x) %in% whichtags oldtags <- names(x)[relevant] newtags <- paste(tagprefix, oldtags, sep="") newlabl <- paste(lablprefix, att$labl[relevant], sep="") newdesc <- paste(descprefix, att$desc[relevant]) y <- rebadge.fv(x, tags=oldtags, new.desc=newdesc, new.labl=newlabl, new.tags=newtags) return(y) } reconcile.fv <- local({ reconcile.fv <- function(...) { ## reconcile several fv objects by finding the columns they share in common z <- list(...) if(!all(unlist(lapply(z, is.fv)))) { if(length(z) == 1 && is.list(z[[1L]]) && all(unlist(lapply(z[[1L]], is.fv)))) z <- z[[1L]] else stop("all arguments should be fv objects") } n <- length(z) if(n <= 1) return(z) ## find columns that are common to all estimates keepcolumns <- names(z[[1L]]) keepvalues <- fvnames(z[[1L]], "*") for(i in 2:n) { keepcolumns <- intersect(keepcolumns, names(z[[i]])) keepvalues <- intersect(keepvalues, fvnames(z[[i]], "*")) } if(length(keepvalues) == 0) stop("cannot reconcile fv objects: they have no columns in common") ## determine name of the 'preferred' column prefs <- unlist(lapply(z, fvnames, a=".y")) prefskeep <- prefs[prefs %in% keepvalues] if(length(prefskeep) > 0) { ## pick the most popular chosen <- unique(prefskeep)[which.max(table(prefskeep))] } else { ## drat - pick a value arbitrarily chosen <- keepvalues[1L] } z <- lapply(z, rebadge.fv, new.preferred=chosen) z <- lapply(z, "[.fv", j=keepcolumns) ## also clip to the same r values rmax <- min(sapply(z, maxrval)) z <- lapply(z, cliprmax, rmax=rmax) return(z) } maxrval <- function(x) { max(with(x, .x)) } cliprmax <- function(x, rmax) { x[ with(x, .x) <= rmax, ] } reconcile.fv }) as.function.fv <- function(x, ..., value=".y", extrapolate=FALSE) { trap.extra.arguments(...) value.orig <- value ## extract function argument xx <- with(x, .x) ## extract all function values yy <- as.data.frame(x)[, fvnames(x, "*"), drop=FALSE] ## determine which value(s) to supply if(!is.character(value)) stop("value should be a string or vector specifying columns of x") if(!all(value %in% colnames(yy))) { expandvalue <- try(fvnames(x, value)) if(!inherits(expandvalue, "try-error")) { value <- expandvalue } else stop("Unable to determine columns of x") } yy <- yy[,value, drop=FALSE] argname <- fvnames(x, ".x") ## determine extrapolation rule (1=NA, 2=most extreme value) stopifnot(is.logical(extrapolate)) stopifnot(length(extrapolate) %in% 1:2) endrule <- 1 + extrapolate ## make function(s) if(length(value) == 1 && !identical(value.orig, "*")) { ## make a single 'approxfun' and return it f <- approxfun(xx, yy[,,drop=TRUE], rule=endrule) ## magic names(formals(f))[1L] <- argname body(f)[[4L]] <- as.name(argname) } else { ## make a list of 'approxfuns' with different function values funs <- lapply(yy, approxfun, x = xx, rule = endrule) ## return a function which selects the appropriate 'approxfun' and executes f <- function(xxxx, what=value) { what <- match.arg(what) funs[[what]](xxxx) } ## recast function definition ## ('any sufficiently advanced technology is ## indistinguishable from magic' -- Arthur C. Clarke) formals(f)[[2L]] <- value names(formals(f))[1L] <- argname ## body(f)[[3L]][[2L]] <- as.name(argname) body(f) <- eval(substitute(substitute(z, list(xxxx=as.name(argname))), list(z=body(f)))) } class(f) <- c("fvfun", class(f)) attr(f, "fname") <- attr(x, "fname") attr(f, "yexp") <- attr(x, "yexp") return(f) } print.fvfun <- function(x, ...) { y <- args(x) yexp <- as.expression(attr(x, "yexp")) body(y) <- as.name(paste("Returns interpolated value of", yexp)) print(y, ...) return(invisible(NULL)) } findcbind <- function(root, depth=0, maxdepth=1000) { ## recursive search through a parse tree to find calls to 'cbind' if(depth > maxdepth) stop("Reached maximum depth") if(length(root) == 1) return(NULL) if(identical(as.name(root[[1L]]), as.name("cbind"))) return(list(numeric(0))) out <- NULL for(i in 2:length(root)) { di <- findcbind(root[[i]], depth+1, maxdepth) if(!is.null(di)) out <- append(out, lapply(di, append, values=i, after=FALSE)) } return(out) } .MathOpNames <- c("+", "-", "*", "/", "^", "%%", "%/%", "&", "|", "!", "==", "!=", "<", "<=", ">=", ">") distributecbind <- local({ distributecbind <- function(x) { ## x is an expression involving a call to 'cbind' ## return a vector of expressions, each obtained by replacing 'cbind(...)' ## by one of its arguments in turn. stopifnot(typeof(x) == "expression") xlang <- x[[1L]] locations <- findcbind(xlang) if(length(locations) == 0) return(x) ## cbind might occur more than once ## check that the number of arguments is the same each time narg <- unique(sapply(locations, nargs.in.expr, e=xlang)) if(length(narg) > 1) return(NULL) out <- NULL if(narg > 0) { for(i in 1:narg) { ## make a version of the expression ## in which cbind() is replaced by its i'th argument fakexlang <- xlang for(loc in locations) { if(length(loc) > 0) { ## usual case: 'loc' is integer vector representing nested index cbindcall <- xlang[[loc]] ## extract i-th argument argi <- cbindcall[[i+1]] ## if argument is an expression, enclose it in parentheses if(length(argi) > 1 && paste(argi[[1L]]) %in% .MathOpNames) argi <- substitute((x), list(x=argi)) ## replace cbind call by its i-th argument fakexlang[[loc]] <- argi } else { ## special case: 'loc' = integer(0) representing xlang itself cbindcall <- xlang ## extract i-th argument argi <- cbindcall[[i+1L]] ## replace cbind call by its i-th argument fakexlang <- cbindcall[[i+1L]] } } ## add to final expression out <- c(out, as.expression(fakexlang)) } } return(out) } nargs.in.expr <- function(loc, e) { n <- if(length(loc) > 0) length(e[[loc]]) else length(e) return(n - 1L) } distributecbind }) ## Form a new 'fv' object as a ratio ratfv <- function(df, numer, denom, ..., ratio=TRUE) { ## Determine y if(!missing(df) && !is.null(df)) { y <- fv(df, ...) num <- NULL } else { ## Compute numer/denom ## Numerator must be a data frame num <- fv(numer, ...) ## Denominator may be a data frame or a constant force(denom) y <- eval.fv(num/denom) ## relabel y <- fv(as.data.frame(y), ...) } if(!ratio) return(y) if(is.null(num)) { ## Compute num = y * denom ## Denominator may be a data frame or a constant force(denom) num <- eval.fv(y * denom) ## ditch labels num <- fv(as.data.frame(num), ...) } ## make denominator an fv object if(is.data.frame(denom)) { den <- fv(denom, ...) } else if(is.numeric(denom)) { ## numeric scalar or vector nd <- length(denom) if(nd != 1 && nd != (ny <- nrow(y))) stop(paste("Length of 'denom'", paren(paste0("=", nd)), "is not equal to length of numerator", paren(paste0("=", ny)))) ## replicate it in all the data columns dendf <- as.data.frame(num) valuecols <- (names(num) != fvnames(num, ".x")) dendf[, valuecols] <- denom den <- fv(dendf, ...) } else stop("'denom' should be a data frame, a numeric constant, or a numeric vector") ## tweak the descriptions ok <- (names(y) != fvnames(y, ".x")) attr(num, "desc")[ok] <- paste("numerator of", attr(num, "desc")[ok]) attr(den, "desc")[ok] <- paste("denominator of", attr(den, "desc")[ok]) ## form ratio object y <- rat(y, num, den, check=FALSE) return(y) } ## Tack new column(s) onto a ratio fv object bind.ratfv <- function(x, numerator=NULL, denominator=NULL, labl = NULL, desc = NULL, preferred = NULL, ratio=TRUE, quotient=NULL) { if(ratio && !inherits(x, "rat")) stop("ratio=TRUE is set, but x has no ratio information", call.=FALSE) if(is.null(numerator) && !is.null(denominator) && !is.null(quotient)) numerator <- quotient * denominator if(is.null(denominator) && inherits(numerator, "rat")) { ## extract numerator & denominator from ratio object both <- numerator denominator <- attr(both, "denominator") usenames <- fvnames(both, ".a") numerator <- as.data.frame(both)[,usenames,drop=FALSE] denominator <- as.data.frame(denominator)[,usenames,drop=FALSE] ## labels default to those of ratio object ma <- match(usenames, colnames(both)) if(is.null(labl)) labl <- attr(both, "labl")[ma] if(is.null(desc)) desc <- attr(both, "desc")[ma] } # calculate ratio # The argument 'quotient' is rarely needed # except to avoid 0/0 or to improve accuracy if(is.null(quotient)) quotient <- numerator/denominator # bind new column to x y <- bind.fv(x, quotient, labl=labl, desc=desc, preferred=preferred) if(!ratio) return(y) ## convert scalar denominator to data frame if(!is.data.frame(denominator)) { if(!is.numeric(denominator) || !is.vector(denominator)) stop("Denominator should be a data frame or a numeric vector") nd <- length(denominator) if(nd != 1 && nd != nrow(x)) stop("Denominator has wrong length") dvalue <- denominator denominator <- numerator denominator[] <- dvalue } ## Now fuse with x num <- attr(x, "numerator") den <- attr(x, "denominator") num <- bind.fv(num, numerator, labl=labl, desc=paste("numerator of", desc), preferred=preferred) den <- bind.fv(den, denominator, labl=labl, desc=paste("denominator of", desc), preferred=preferred) y <- rat(y, num, den, check=FALSE) return(y) } conform.ratfv <- function(x) { ## harmonise display properties in components of a ratio stopifnot(inherits(x, "rat"), is.fv(x)) num <- attr(x, "numerator") den <- attr(x, "denominator") formula(num) <- formula(den) <- formula(x) fvnames(num, ".") <- fvnames(den, ".") <- fvnames(x, ".") unitname(num) <- unitname(den) <- unitname(x) attr(x, "numerator") <- num attr(x, "denominator") <- den return(x) } spatstat.explore/R/allstats.R0000644000176200001440000000220514611073307015746 0ustar liggesusers# # # allstats.R # # $Revision: 1.18 $ $Date: 2016/02/11 10:17:12 $ # # allstats <- function(pp, ..., dataname=NULL,verb=FALSE) { # # Function allstats --- to calculate the F, G, K, and J functions # for an unmarked point pattern. # verifyclass(pp,"ppp") if(is.marked(pp)) stop("This function is applicable only to unmarked patterns.\n") # estimate F, G and J if(verb) cat("Calculating F, G, J ...") Jout <- do.call.matched(Jest,list(X=pp, ...)) if(verb) cat("ok.\n") # extract F, G and J Fout <- attr(Jout, "F") Gout <- attr(Jout, "G") attr(Jout, "F") <- NULL attr(Jout, "G") <- NULL fns <- list("F function"=Fout, "G function"=Gout, "J function"=Jout) # compute second moment function K if(verb) cat("Calculating K function...") Kout <- do.call.matched(Kest, list(X=pp, ...)) fns <- append(fns, list("K function"=Kout)) if(verb) cat("done.\n") # add title if(is.null(dataname)) dataname <- short.deparse(substitute(pp)) title <- paste("Four summary functions for ", dataname,".",sep="") attr(fns, "title") <- title # fns <- as.anylist(fns) return(fns) } spatstat.explore/R/stienen.R0000644000176200001440000000415214611073310015561 0ustar liggesusers## stienen.R ## ## Stienen diagram with border correction ## ## $Revision: 1.9 $ $Date: 2020/12/19 05:25:06 $ stienen <- function(X, ..., bg="grey", border=list(bg=NULL)) { Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) if(npoints(X) <= 1) { W <- Window(X) dont.complain.about(W) do.call(plot, resolve.defaults(list(x=quote(W)), list(...), list(main=Xname))) return(invisible(NULL)) } d <- nndist(X) b <- bdist.points(X) Y <- X %mark% d observed <- (b >= d) Yobserved <- Y[observed] gp <- union(graphicsPars("symbols"), "lwd") dont.complain.about(Yobserved) do.call.plotfun(plot.ppp, resolve.defaults(list(x=quote(Yobserved), markscale=1), list(...), list(bg=bg), list(main=Xname)), extrargs=gp) if(!identical(border, FALSE)) { if(!is.list(border)) border <- list() Ycensored <- Y[!observed] dont.complain.about(Ycensored) do.call.plotfun(plot.ppp, resolve.defaults(list(x=quote(Ycensored), markscale=1, add=TRUE), border, list(...), list(bg=bg), list(cols=grey(0.5), lwd=2)), extrargs=gp) } return(invisible(NULL)) } stienenSet <- function(X, edge=TRUE) { stopifnot(is.ppp(X)) nnd <- nndist(X) if(!edge) { ok <- bdist.points(X) >= nnd X <- X[ok] nnd <- nnd[ok] } n <- npoints(X) if(n == 0) return(emptywindow(Window(X))) if(n == 1) return(Window(X)) rad <- nnd/2 if(!all(ok <- (rad > 0))) { eps <- min(rad[ok], shortside(Frame(X)))/100 rad <- pmax(rad, eps) } delta <- 2 * pi * max(rad)/128 Z <- disc(rad[1], X[1], delta=delta) for(i in 2:n) Z <- union.owin(Z, disc(rad[i], X[i], delta=delta)) return(Z) } spatstat.explore/R/localK.R0000644000176200001440000001550514611073310015325 0ustar liggesusers# # localK.R Getis-Franklin neighbourhood density function # # $Revision: 1.25 $ $Date: 2019/06/23 06:30:55 $ # # "localL" <- function(X, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { localK(X, wantL=TRUE, rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } "localLinhom" <- function(X, lambda=NULL, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { localKinhom(X, lambda=lambda, wantL=TRUE, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue, sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) } "localK" <- function(X, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") localKengine(X, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) } "localKinhom" <- function(X, lambda=NULL, ..., rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE) { verifyclass(X, "ppp") a <- resolve.lambda(X, lambda, ..., sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) result <- localKengine(X, lambda=a$lambda, ..., rmax = rmax, correction=correction, verbose=verbose, rvalue=rvalue) if(a$danger) attr(result, "dangerous") <- a$dangerous return(result) } "localKengine" <- function(X, ..., wantL=FALSE, lambda=NULL, rmax = NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { npts <- npoints(X) W <- X$window areaW <- area(W) lambda.ave <- npts/areaW lambda1.ave <- (npts - 1)/areaW weighted <- !is.null(lambda) if(is.null(rvalue)) rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i if(weighted) { J <- close$j lambdaJ <- lambda[J] weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), npts)) labl <- desc <- character(npts) if(verbose) state <- list() switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights df[,i] <- cumsum(wh) icode <- numalign(i, npts) names(df)[i] <- paste("un", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) df[,i] <- Ktrans icode <- numalign(i, npts) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) df[,i] <- Kiso icode <- numalign(i, npts) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- makefvlabel(NULL, "hat", character(2), icode) desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) state <- progressreport(i, npts, state=state) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } # function value table required # add r and theo if(!wantL) { df <- cbind(df, data.frame(r=r, theo=pi * r^2)) if(!weighted) { fnam <- c("K", "loc") yexp <- ylab <- quote(K[loc](r)) } else { fnam <- c("K", "list(inhom,loc)") ylab <- quote(K[inhom,loc](r)) yexp <- quote(K[list(inhom,loc)](r)) } } else { df <- cbind(df, data.frame(r=r, theo=r)) if(!weighted) { fnam <- c("L", "loc") yexp <- ylab <- quote(L[loc](r)) } else { fnam <- c("L", "list(inhom,loc)") ylab <- quote(L[inhom,loc](r)) yexp <- quote(L[list(inhom,loc)](r)) } } desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "{%s[%s]^{pois}}(r)")) # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam, yexp=yexp) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } spatstat.explore/R/bw.ppl.R0000644000176200001440000000443114666525762015345 0ustar liggesusers#' #' bw.ppl.R #' #' Likelihood cross-validation for kernel smoother of point pattern #' #' $Revision: 1.17 $ $Date: 2024/09/06 07:09:13 $ #' bw.ppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, varcov1=NULL, weights=NULL, shortcut=TRUE, warn=TRUE) { stopifnot(is.ppp(X)) if(!is.null(varcov1)) check.nmatrix(varcov1, 2, things="spatial dimensions", mname="varcov1") if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) { check.range(srange) } else { ## default rule based on point pattern spacing and window size nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) if(!is.null(varcov1)) { dref <- det(varcov1)^(1/4) srange <- srange/dref } } sigma <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) } cv <- numeric(ns) if(shortcut) { for(i in 1:ns) { if(is.null(varcov1)) { si <- sigma[i] vi <- NULL } else { si <- NULL vi <- (sigma[i]^2) * varcov1 } lamx <- density(X, sigma=si, varcov=vi, at="points", leaveoneout=TRUE, weights=weights, ...) cv[i] <- sum(log(lamx)) } } else { IntLam <- numeric(ns) for(i in 1:ns) { if(is.null(varcov1)) { si <- sigma[i] vi <- NULL } else { si <- NULL vi <- (sigma[i]^2) * varcov1 } lamx <- density(X, sigma=si, varcov=vi, at="points", leaveoneout=TRUE, weights=weights, ...) lam <- density(X, sigma=si, varcov=vi, weights=weights, ...) mu <- integral.im(lam) cv[i] <- sum(log(lamx)) - mu IntLam[i] <- mu } } result <- bw.optim(cv, sigma, iopt=which.max(cv), optimum="max", creator="bw.ppl", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames="srange", unitname=if(is.null(varcov1)) unitname(X) else NULL, template=varcov1, exponent=2) if(!shortcut) attr(result, "info") <- list(IntegralLambda=IntLam) return(result) } spatstat.explore/R/digestCovariates.R0000644000176200001440000000411014611073310017406 0ustar liggesusers#' #' digestCovariates.R #' #' $Revision: 1.5 $ $Date: 2022/05/10 04:18:15 $ #' is.scov <- function(x) { #' Determines whether x is a valid candidate for a spatial covariate #' A spatial object is OK if it can be coerced to a function if(inherits(x, c("im", "funxy", "owin", "tess", "ssf", "leverage.ppm"))) return(TRUE) #' A function(x,y,...) is OK if(is.function(x) && identical(names(formals(x))[1:2], c("x", "y"))) return(TRUE) #' A single character "x" or "y" is OK if(is.character(x) && length(x) == 1 && (x %in% c("x", "y"))) return(TRUE) #' Can't handle input return(FALSE) } ## Assumes each input (besides W) is a single covariate or a list of covariates ## Returns a `solist` with possibly a unitname attribute digestCovariates <- function(..., W = NULL) { x <- list(...) #' Find individual covariates in list valid <- sapply(x, is.scov) covs <- x[valid] #' The remaining entries are assumed to be lists of covariates #' so we unlist them x <- unlist(x[!valid], recursive = FALSE) valid <- sapply(x, is.scov) if(!all(valid)) stop("Couldn't interpret all input as spatial covariates.") covs <- append(covs, x) if(any(needW <- !sapply(covs, is.sob))) { if(is.null(W)){ if(all(needW)) stop("Unable to determine spatial domain for covariates", call.=FALSE) boxes <- lapply(covs[!needW], Frame) W <- do.call(boundingbox, boxes) } else stopifnot(is.owin(W)) } #' Now covs is a list of valid covariates we can loop through covunits <- lapply(covs, unitname) for(i in seq_along(covs)){ covar <- covs[[i]] if(is.character(covar) && length(covar) == 1 && (covar %in% c("x", "y"))) { covar <- if(covar == "x"){ function(x,y) { x } } else{ function(x,y) { y } } } if(is.function(covar)) { if(!inherits(covar, "funxy")) covar <- funxy(f = covar, W = W) covs[[i]] <- covar covunits[[i]] <- unitname(covar) } } covs <- as.solist(covs) attr(covs, "covunits") <- covunits return(covs) } spatstat.explore/R/rat.R0000644000176200001440000000356114611073310014705 0ustar liggesusers# # rat.R # # Ratio objects # # Numerator and denominator are stored as attributes # # $Revision: 1.14 $ $Date: 2022/04/22 01:27:03 $ # rat <- function(ratio, numerator, denominator, check=TRUE) { if(check) { stopifnot(compatible(numerator, denominator)) stopifnot(compatible(ratio, denominator)) } attr(ratio, "numerator") <- numerator attr(ratio, "denominator") <- denominator class(ratio) <- unique(c("rat", class(ratio))) return(ratio) } print.rat <- function(x, ...) { NextMethod("print") cat("[Contains ratio information]\n") return(invisible(NULL)) } compatible.rat <- function(A, B, ...) { NextMethod("compatible") } adjust.ratfv <- function(f, columns=fvnames(f, "*"), numfactor=1, denfactor=1) { stopifnot(is.fv(f)) f[,columns] <- (numfactor/denfactor) * as.data.frame(f)[,columns] if(numfactor != 1 && !is.null(num <- attr(f, "numerator"))) { num[,columns] <- numfactor * as.data.frame(num)[,columns] attr(f, "numerator") <- num } if(denfactor != 1 && !is.null(den <- attr(f, "denominator"))) { den[,columns] <- denfactor * as.data.frame(den)[,columns] attr(f, "denominator") <- den } return(f) } tweak.ratfv.entry <- function(x, ...) { # apply same tweak to function, numerator and denominator. x <- tweak.fv.entry(x, ...) if(!is.null(num <- attr(x, "numerator"))) attr(x, "numerator") <- tweak.fv.entry(num, ...) if(!is.null(den <- attr(x, "denominator"))) attr(x, "denominator") <- tweak.fv.entry(den, ...) return(x) } "[.rat" <- function(x, ...) { if(!is.fv(x)) stop("Not yet implemented for non-fv ratios") num <- attr(x, "numerator") den <- attr(x, "denominator") class(x) <- c("fv", "data.frame") x <- x[...] den <- den[...] num <- num[...] attr(x, "numerator") <- num attr(x, "denominator") <- den class(x) <- unique(c("rat", class(x))) return(x) } spatstat.explore/R/Jinhom.R0000644000176200001440000003374514611073307015360 0ustar liggesusers# # Jinhom.R # # $Revision: 1.22 $ $Date: 2023/04/08 04:07:04 $ # Ginhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) # determine 'r' values rmaxdefault <- rmax.rule("G", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(inherits(lambda, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambda' is a fitted model", call.=FALSE) model <- lambda if(!update || inherits(model, "slrm")) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- update(model, X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Ginhom.update", "The behaviour of Ginhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Ginhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { check.nvector(lambda, npts, vname="lambda") lambdaX <- lambda } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) if(lmin >= min(lambdaX)) stop("lmin must be smaller than all values of lambda") } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(inherits(lambda, c("ppm", "kppm", "dppm", "slrm"))) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, 0.95 * min(lambdaX)) } ## Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio if(warn.bias) { ra <- range(lratio) if(ra[1] < 1e-6 || ra[2] > 1 - 1e-6) warning(paste("Possible bias: range of values of lmin/lambdaX is", prange(signif(ra, 5))), call.=FALSE) } ## sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # compute local cumulative products z <- .C(SE_locprod, n = as.integer(npts), x = as.double(xord), y = as.double(yord), v = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(npts * nr)), PACKAGE="spatstat.explore") ans <- matrix(z$ans, nrow=nr, ncol=npts) # revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=npts) loccumprod[, oX] <- ans # border correction bX <- bdist.points(X) ok <- outer(r, bX, "<=") denom <- .rowSums(ok, nr, npts) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, npts) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom](r)), "theo", NULL, c(0,rmax), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("G", "inhom"), ratio=ratio) G <- bind.ratfv(G, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) # formula(G) <- . ~ r fvnames(G, ".") <- c("bord", "theo") unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) if(danger) attr(G, "dangerous") <- dangerous if(savelambda) { attr(G, "lambda") <- lambdaX attr(G, "lmin") <- lmin } return(G) } Finhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) areaW <- area(W) miss.update <- missing(update) dotargs <- list(...) eps <- dotargs$eps rorbgiven <- !is.null(r) || !is.null(breaks) checkspacing <- !isFALSE(dotargs$checkspacing) testme <- isTRUE(dotargs$testme) ## determine 'r' values rmaxdefault <- rmax.rule("F", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, eps, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) ## check spacing of 'r' values? if(testme || (rorbgiven && checkspacing)) check.finespacing(r, if(is.null(eps)) NULL else eps/4, as.mask(W, eps=eps), rmaxdefault=rmaxdefault, action="fatal", rname="r", context="in Finhom(X, r)") ## Determine intensity dangerous <- "lambda" danger <- TRUE # Intensity values at data points if(is.null(lambda)) { # No intensity data provided danger <- FALSE # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(inherits(lambda, c("ppm", "kppm", "dppm", "slrm"))) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'lambda' is a fitted model", call.=FALSE) model <- lambda if(!update || inherits(model, "slrm")) { ## just use intensity of fitted model lambdaX <- predict(lambda, locations=X, type="trend") } else { ## re-fit model to data X model <- update(model, X) lambdaX <- fitted(model, dataonly=TRUE) danger <- FALSE if(miss.update) warn.once(key="Finhom.update", "The behaviour of Finhom when lambda is a ppm object", "has changed (in spatstat 1.37-0 and later).", "See help(Finhom)") } } else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { check.nvector(lambda, npts, vname="lambda") lambdaX <- lambda } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) if(lmin >= min(lambdaX)) stop("lmin must be smaller than all values of lambda") } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(inherits(lambda, c("ppm", "kppm", "dppm", "slrm"))) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, 0.95 * min(lambdaX)) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio if(warn.bias) { ra <- range(lratio) if(ra[1] < 1e-6 || ra[2] > 1 - 1e-6) warning(paste("Possible bias: range of values of lmin/lambdaX is", prange(signif(ra, 5))), call.=FALSE) } ## sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # determine pixel grid and compute distance to boundary M <- do.call.matched(as.mask, append(list(w=W), list(...))) bM <- bdist.pixels(M, style="matrix") bM <- as.vector(bM) # x, y coordinates of pixels are already sorted by increasing x xM <- as.vector(rasterx.mask(M)) yM <- as.vector(rastery.mask(M)) nM <- length(xM) # compute local cumulative products z <- .C(SE_locxprod, ntest = as.integer(nM), xtest = as.double(xM), ytest = as.double(yM), ndata = as.integer(npts), xdata = as.double(xord), ydata = as.double(yord), vdata = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nM * nr)), PACKAGE="spatstat.explore") loccumprod <- matrix(z$ans, nrow=nr, ncol=nM) # border correction ok <- outer(r, bM, "<=") denom <- .rowSums(ok, nr, nM) loccumprod[!ok] <- 0 numer <- .rowSums(loccumprod, nr, nM) # pack up Fdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) FX <- ratfv(Fdf, NULL, theo.denom, "r", quote(F[inhom](r)), "theo", NULL, c(0,rmax), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("F", "inhom"), ratio=ratio) FX <- bind.ratfv(FX, data.frame(bord=denom-numer), denom, "{hat(%s)[%s]^{bord}}(r)", "border estimate of %s", "bord", ratio=ratio) ## wrap up formula(FX) <- . ~ r fvnames(FX, ".") <- c("bord", "theo") unitname(FX) <- unitname(X) if(ratio) FX <- conform.ratfv(FX) ## tack on additional information if(danger) attr(FX, "dangerous") <- dangerous if(savelambda) { attr(FX, "lambda") <- lambdaX attr(FX, "lmin") <- lmin } ## arguments to be used in envelope, etc attr(FX, "conserve") <- list(checkspacing=FALSE) return(FX) } Jinhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) { ## compute inhomogeneous G (including determination of r and lmin) GX <- Ginhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, breaks=breaks, ratio=FALSE, update=update, warn.bias=warn.bias, savelambda=TRUE) ## extract auxiliary values to be used for Finhom r <- GX$r lmin <- attr(GX, "lmin") lambdaX <- attr(GX, "lambda") ## compute inhomogeneous J using previously-determined values FX <- Finhom(X, lambda=lambdaX, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, ratio=FALSE, update=update, warn.bias=FALSE, savelambda=FALSE) ## evaluate inhomogeneous J function if(!ratio) { JX <- eval.fv((1-GX)/(1-FX)) } else { num <- eval.fv(1 - GX) den <- eval.fv(1 - FX) JX <- eval.fv(num/den) JX <- rat(JX, num, den) } ## relabel the fv object JX <- rebadge.fv(JX, quote(J[inhom](r)), c("J","inhom"), names(JX), new.labl=attr(GX, "labl")) ## tack on extra info attr(JX, "G") <- GX attr(JX, "F") <- FX attr(JX, "dangerous") <- attr(GX, "dangerous") attr(JX, "conserve") <- append(attr(GX, "conserve"), attr(FX, "conserve")) if(savelambda) { attr(JX, "lmin") <- lmin attr(JX, "lambda") <- lambdaX } return(JX) } spatstat.explore/R/bw.pplHeat.R0000644000176200001440000000720014611073311016116 0ustar liggesusers#' #' bw.pplHeat.R #' #' Bandwidth selection for densityHeat.ppp #' by point process likelihood cross-validation #' #' Copyright (c) 2020 Adrian Baddeley, Tilman Davies and Suman Rakshit #' GNU Public Licence >= 2.0 bw.pplHeat <- function(X, ..., srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose=TRUE) { #' compute intensity estimates b <- HeatEstimates.ppp(X, ..., srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) lambda <- b$lambda h <- b$h hname <- b$hname #' compute likelihood cross-validation criterion CV <- rowSums(log(lambda)) iopt <- which.max(CV) result <- bw.optim(CV, h, iopt, criterion="Likelihood cross-validation", hname=hname, unitname=unitname(X)) return(result) } HeatEstimates.ppp <- function(X, ..., srange=NULL, ns=16, sigma=NULL, leaveoneout=FALSE, verbose=TRUE) { stopifnot(is.ppp(X)) nX <- npoints(X) ## trap a common error if(length(argh <- list(...)) && (is.null(nama <- names(argh)) || !nzchar(nama[[1L]])) && is.numeric(a <- argh[[1L]]) && length(a) == 1L) stop("Use argument 'sigma' to specify the maximum bandwidth!", call.=FALSE) ## determine candidate bandwidths if(is.numeric(sigma) && length(sigma)) { ## sigma is a vector of candidate bandwidths, or a maximum bandwidth sigMax <- max(sigma) fractions <- if(length(sigma) > 1) sigma/sigMax else geomseq(from=0.05, to=1, length.out=ns) } else if(is.im(sigma)) { ## sigma is an image giving the spatially-varying maximum bandwidth sigMax <- sigma fractions <- seq_len(ns)/ns } else if(is.null(sigma)) { #' make a sequence of candidate bandwidths if(!is.null(srange)) { check.range(srange) } else { nnd <- nndist(X) srange <- c(min(nnd[nnd > 0]), diameter(as.owin(X))/2) } sigMax <- srange[2] sigmavalues <- geomseq(from=srange[1L], to=srange[2L], length.out=ns) fractions <- sigmavalues/sigMax } else stop("Format of sigma is not understood") ## set up transition matrix and initial state a <- densityHeat.ppp(X, sigMax, ..., internal=list(setuponly=TRUE)) Y <- a$Y # initial state image u <- a$u # initial state vector (dropping NA) Xpos <- a$Xpos # location of each data point, index in 'u' A <- a$A # transition matrix, operates on 'u; Nstep <- a$Nstep # total number of iterations ## map desired sigma values to iteration numbers nits <- pmax(1L, pmin(Nstep, round(Nstep * fractions^2))) nits <- diff(c(0L,nits)) reciprocalpixelarea <- with(Y, 1/(xstep * ystep)) ## compute .... lambda <- matrix(nrow=ns, ncol=nX) if(!leaveoneout) { ## usual estimates for(k in seq_len(ns)) { for(l in seq_len(nits[k])) u <- u %*% A lambda[k, ] <- u[Xpos] } } else { ## compute leave-one-out estimates if(verbose) { cat("Processing", nX, "points ... ") pstate <- list() } for(i in seq_len(nX)) { ## initial state = X[-i] ui <- u Xposi <- Xpos[i] ui[Xposi] <- ui[Xposi] - reciprocalpixelarea ## run iterations, pausing at each sigma value for(k in seq_len(ns)) { for(l in seq_len(nits[k])) ui <- ui %*% A lambda[k, i] <- ui[Xposi] } if(verbose) pstate <- progressreport(i, nX, state=pstate) } if(verbose) cat("Done.\n") } if(!is.im(sigma)) { h <- sigMax * fractions hname <- "sigma" } else { h <- fractions hname <- "fract" } return(list(lambda=lambda, h=h, hname=hname)) } spatstat.explore/R/bw.diggle.R0000644000176200001440000000535114611073307015766 0ustar liggesusers## ## bw.diggle.R ## ## bandwidth selection rule bw.diggle (for density.ppp) ## ## $Revision: 1.9 $ $Date: 2022/05/21 08:53:38 $ ## bw.diggle <- local({ #' integrand phi <- function(x,h) { if(h <= 0) return(numeric(length(x))) y <- pmax.int(0, pmin.int(1, x/(2 * h))) 4 * pi * h^2 * (acos(y) - y * sqrt(1 - y^2)) } #' secret option for debugging mf <- function(..., method=c("C", "interpreted")) { match.arg(method) } bw.diggle <- function(X, ..., correction="good", hmax=NULL, nr=512, warn=TRUE) { stopifnot(is.ppp(X)) method <- mf(...) W <- Window(X) lambda <- npoints(X)/area(W) rmax <- if(!is.null(hmax)) (4 * hmax) else rmax.rule("K", W, lambda) r <- seq(0, rmax, length=nr) K <- Kest(X, r=r, correction=correction) yname <- fvnames(K, ".y") K <- K[, c("r", yname)] ## check that K values can be passed to C code if(any(bad <- !is.finite(K[[yname]]))) { ## throw out bad values lastgood <- min(which(bad)) - 1L if(lastgood < 2L) stop("K function yields too many NA/NaN values") K <- K[1:lastgood, ] } rvals <- K$r ## evaluation of M(r) requires K(2r) rmax2 <- max(rvals)/2 if(!is.null(alim <- attr(K, "alim"))) rmax2 <- min(alim[2L], rmax2) ok <- (rvals <= rmax2) switch(method, interpreted = { rvals <- rvals[ok] nr <- length(rvals) J <- numeric(nr) for(i in 1:nr) J[i] <- stieltjes(phi, K, h=rvals[i])[[yname]]/(2 * pi) }, C = { nr <- length(rvals) nrmax <- sum(ok) dK <- diff(K[[yname]]) ndK <- length(dK) z <- .C(SE_digberJ, r=as.double(rvals), dK=as.double(dK), nr=as.integer(nr), nrmax=as.integer(nrmax), ndK=as.integer(ndK), J=as.double(numeric(nrmax)), PACKAGE="spatstat.explore") J <- z$J rvals <- rvals[ok] }) pir2 <- pi * rvals^2 M <- (1/lambda - 2 * K[[yname]][ok])/pir2 + J/pir2^2 ## This calculation was for the uniform kernel on B(0,h) ## Convert to standard deviation of (one-dimensional marginal) kernel sigma <- rvals/2 result <- bw.optim(M, sigma, optimum="min", creator="bw.diggle", criterion="Berman-Diggle Cross-Validation", J=J, lambda=lambda, warnextreme=warn, hargnames="hmax", unitname=unitname(X)) return(result) } bw.diggle }) spatstat.explore/R/smoothfv.R0000644000176200001440000000323014611073310015755 0ustar liggesusers# # smoothfv.R # # $Revision: 1.16 $ $Date: 2022/01/04 05:30:06 $ # # smooth.fv <- function(x, which="*", ..., # method=c("smooth.spline", "loess"), # xinterval=NULL) { # .Deprecated("Smooth.fv", package="spatstat", # msg="smooth.fv is deprecated: use the generic Smooth with a capital S") # Smooth(x, which=which, ..., method=method, xinterval=xinterval) # } Smooth.fv <- function(X, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { x <- X stopifnot(is.character(which)) method <- match.arg(method) if(!is.null(xinterval)) check.range(xinterval) if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(x, which) } if(any(nbg <- !(which %in% names(x)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) xx <- x[[fvnames(x, ".x")]] # process each column of function values for(ynam in which) { yy <- x[[ynam]] ok <- is.finite(yy) if(!is.null(xinterval)) ok <- ok & inside.range(xx, xinterval) switch(method, smooth.spline = { ss <- smooth.spline(xx[ok], yy[ok], ...) yhat <- predict(ss, xx[ok])$y }, loess = { df <- data.frame(x=xx[ok], y=yy[ok]) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, df[,"x", drop=FALSE]) }) yy[ok] <- yhat x[[ynam]] <- yy } return(x) } spatstat.explore/R/eval.fv.R0000644000176200001440000002340414611073310015456 0ustar liggesusers# # eval.fv.R # # # eval.fv() Evaluate expressions involving fv objects # # compatible.fv() Check whether two fv objects are compatible # # $Revision: 1.42 $ $Date: 2022/01/04 05:30:06 $ # eval.fv <- local({ # main function eval.fv <- function(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) { # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") # get the actual variables if(missing(envir)) { envir <- parent.frame() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames # find out which ones are fv objects fvs <- unlist(lapply(vars, is.fv)) nfuns <- sum(fvs) if(nfuns == 0) stop("No fv objects in this expression") # extract them funs <- vars[fvs] # restrict to columns identified by 'dotnames' if(dotonly) funs <- lapply(funs, restrict.to.dot) # map names if instructed if(!is.null(equiv)) funs <- lapply(funs, mapnames, map=equiv) # test whether the fv objects are compatible if(nfuns > 1L && !(do.call(compatible, unname(funs)))) { warning(paste(if(nfuns > 2) "some of" else NULL, "the functions", commasep(sQuote(names(funs))), "were not compatible: enforcing compatibility")) funs <- do.call(harmonise, append(funs, list(strict=TRUE))) } # copy first object as template result <- funs[[1L]] ## ensure 'conservation' info is retained conserve <- unname(lapply(funs, attr, which="conserve")) if(any(present <- !sapply(conserve, is.null))) { conserve <- do.call(resolve.defaults, conserve[present]) attr(result, "conserve") <- conserve } ## remove potential ratio info class(result) <- setdiff(class(result), "rat") attr(result, "numerator") <- attr(result, "denominator") <- NULL labl <- attr(result, "labl") origdotnames <- fvnames(result, ".") origshadenames <- fvnames(result, ".s") # determine which function estimates are supplied argname <- fvnames(result, ".x") nam <- names(result) ynames <- nam[nam != argname] # for each function estimate, evaluate expression for(yn in ynames) { # extract corresponding estimates from each fv object funvalues <- lapply(funs, "[[", i=yn) # insert into list of argument values vars[fvs] <- funvalues # evaluate result[[yn]] <- eval(e, vars, enclos=envir) } if(!relabel) return(result) # determine mathematical labels. # 'yexp' determines y axis label # 'ylab' determines y label in printing and description # 'fname' is sprintf-ed into 'labl' for legend yexps <- lapply(funs, attr, which="yexp") ylabs <- lapply(funs, attr, which="ylab") fnames <- lapply(funs, getfname) # Repair 'fname' attributes if blank blank <- unlist(lapply(fnames, isblank)) if(any(blank)) { # Set function names to be object names as used in the expression for(i in which(blank)) attr(funs[[i]], "fname") <- fnames[[i]] <- names(funs)[i] } # Remove duplicated names # Typically occurs when combining several K functions, etc. # Tweak fv objects so their function names are their object names # as used in the expression if(anyDuplicated(fnames)) { newfnames <- names(funs) for(i in 1:nfuns) funs[[i]] <- rebadge.fv(funs[[i]], new.fname=newfnames[i]) fnames <- newfnames } if(anyDuplicated(ylabs)) { flatnames <- lapply(funs, flatfname) for(i in 1:nfuns) { new.ylab <- substitute(f(r), list(f=flatnames[[i]])) funs[[i]] <- rebadge.fv(funs[[i]], new.ylab=new.ylab) } ylabs <- lapply(funs, attr, which="ylab") } if(anyDuplicated(yexps)) { newfnames <- names(funs) for(i in 1:nfuns) { new.yexp <- substitute(f(r), list(f=as.name(newfnames[i]))) funs[[i]] <- rebadge.fv(funs[[i]], new.yexp=new.yexp) } yexps <- lapply(funs, attr, which="yexp") } # now compute y axis labels for the result attr(result, "yexp") <- eval(substitute(substitute(e, yexps), list(e=elang))) attr(result, "ylab") <- eval(substitute(substitute(e, ylabs), list(e=elang))) # compute fname equivalent to expression if(nfuns > 1L) { # take original expression the.fname <- paren(flatten(deparse(elang))) } else if(nzchar(oldname <- flatfname(funs[[1L]]))) { # replace object name in expression by its function name namemap <- list(as.name(oldname)) names(namemap) <- names(funs)[1L] the.fname <- deparse(eval(substitute(substitute(e, namemap), list(e=elang)))) } else the.fname <- names(funs)[1L] attr(result, "fname") <- the.fname # now compute the [modified] y labels labelmaps <- lapply(funs, fvlabelmap, dot=FALSE) for(yn in ynames) { # labels for corresponding columns of each argument funlabels <- lapply(labelmaps, "[[", i=yn) # form expression involving these columns labl[match(yn, names(result))] <- flatten(deparse(eval(substitute(substitute(e, f), list(e=elang, f=funlabels))))) } attr(result, "labl") <- labl # copy dotnames and shade names from template fvnames(result, ".") <- origdotnames[origdotnames %in% names(result)] if(!is.null(origshadenames) && all(origshadenames %in% names(result))) fvnames(result, ".s") <- origshadenames return(result) } # helper functions restrict.to.dot <- function(z) { argu <- fvnames(z, ".x") dotn <- fvnames(z, ".") shadn <- fvnames(z, ".s") ok <- colnames(z) %in% unique(c(argu, dotn, shadn)) return(z[, ok]) } getfname <- function(x) { if(!is.null(y <- attr(x, "fname"))) y else "" } flatten <- function(x) { paste(x, collapse=" ") } mapnames <- function(x, map=NULL) { colnames(x) <- mapstrings(colnames(x), map=map) fvnames(x, ".y") <- mapstrings(fvnames(x, ".y"), map=map) return(x) } isblank <- function(z) { !any(nzchar(z)) } eval.fv }) compatible.fv <- local({ approx.equal <- function(x, y) { max(abs(x-y)) <= .Machine$double.eps } compatible.fv <- function(A, B, ..., samenames=TRUE) { verifyclass(A, "fv") if(missing(B)) { answer <- if(length(...) == 0) TRUE else compatible(A, ...) return(answer) } verifyclass(B, "fv") ## is the function argument the same? samearg <- (fvnames(A, ".x") == fvnames(B, ".x")) if(!samearg) return(FALSE) if(samenames) { ## are all columns the same, and in the same order? namesmatch <- isTRUE(all.equal(names(A),names(B))) && samearg && (fvnames(A, ".y") == fvnames(B, ".y")) if(!namesmatch) return(FALSE) } ## are 'r' values the same ? rA <- with(A, .x) rB <- with(B, .x) rmatch <- (length(rA) == length(rB)) && approx.equal(rA, rB) if(!rmatch) return(FALSE) ## A and B are compatible if(length(list(...)) == 0) return(TRUE) ## recursion return(compatible.fv(B, ...)) } compatible.fv }) # force a list of functions to be compatible with regard to 'x' values harmonize.fv <- harmonise.fv <- local({ harmonise.fv <- function(..., strict=FALSE) { argh <- list(...) n <- length(argh) if(n == 0) return(argh) if(n == 1) { a1 <- argh[[1L]] if(is.fv(a1)) return(argh) if(is.list(a1) && all(sapply(a1, is.fv))) { argh <- a1 n <- length(argh) } } isfv <- sapply(argh, is.fv) if(!all(isfv)) stop("All arguments must be fv objects") if(n == 1) return(argh[[1L]]) ## determine range of argument ranges <- lapply(argh, argumentrange) xrange <- c(max(unlist(lapply(ranges, min))), min(unlist(lapply(ranges, max)))) if(diff(xrange) < 0) stop("No overlap in ranges of argument") if(strict) { ## find common column names and keep these keepnames <- Reduce(intersect, lapply(argh, colnames)) argh <- lapply(argh, "[", j=keepnames) } ## determine finest resolution xsteps <- sapply(argh, argumentstep) finest <- which.min(xsteps) ## extract argument values xx <- with(argh[[finest]], .x) xx <- xx[xrange[1L] <= xx & xx <= xrange[2L]] xrange <- range(xx) ## convert each fv object to a function funs <- lapply(argh, as.function, value="*") ## evaluate at common argument result <- vector(mode="list", length=n) for(i in 1:n) { ai <- argh[[i]] fi <- funs[[i]] xxval <- list(xx=xx) names(xxval) <- fvnames(ai, ".x") starnames <- fvnames(ai, "*") ## ensure they are given in same order as current columns starnames <- colnames(ai)[colnames(ai) %in% starnames] yyval <- lapply(starnames, function(v,xx,fi) fi(xx, v), xx=xx, fi=fi) names(yyval) <- starnames ri <- do.call(data.frame, append(xxval, yyval)) fva <- .Spatstat.FvAttrib attributes(ri)[fva] <- attributes(ai)[fva] class(ri) <- c("fv", class(ri)) attr(ri, "alim") <- intersect.ranges(attr(ai, "alim"), xrange) result[[i]] <- ri } names(result) <- names(argh) return(result) } argumentrange <- function(f) { range(with(f, .x)) } argumentstep <- function(f) { mean(diff(with(f, .x))) } harmonise.fv }) spatstat.explore/R/clusterset.R0000644000176200001440000000432314611073310016311 0ustar liggesusers# # clusterset.R # # Allard-Fraley estimator of cluster region # # $Revision: 1.13 $ $Date: 2022/01/04 05:30:06 $ # clusterset <- function(X, what=c("marks", "domain"), ..., verbose=TRUE, fast=FALSE, exact=!fast) { stopifnot(is.ppp(X)) what <- match.arg(what, several.ok=TRUE) if(!missing(exact)) stopifnot(is.logical(exact)) if(fast && exact) stop("fast=TRUE is incompatible with exact=TRUE") # compute duplication exactly as in deldir, or the universe will explode X <- unique(unmark(X), rule="deldir", warn=TRUE) n <- npoints(X) W <- as.owin(X) # discretised Dirichlet tessellation if(verbose) cat("Computing Dirichlet tessellation...") if(fast || !exact) cellid <- as.im(nnfun(X), ...) # compute tile areas if(fast) { a <- table(factor(as.vector(as.matrix(cellid)), levels=1:n)) if(verbose) cat("done.\n") a <- a + 0.5 A <- sum(a) } else { d <- dirichlet(X) if(verbose) cat("done.\n") D <- tiles(d) suppressWarnings(id <- as.integer(names(D))) if(anyNA(id) && ("marks" %in% what)) stop("Unable to map Dirichlet tiles to data points") A <- area(W) a <- unlist(lapply(D, area)) } # determine optimal selection of tiles ntile <- length(a) o <- order(a) b <- cumsum(a[o]) m <- seq_len(ntile) logl <- -n * log(n) + m * log(m/b) + (n-m) * log((n-m)/(A-b)) mopt <- which.max(logl) picked <- o[seq_len(mopt)] ## map tiles to points if(!fast) picked <- id[picked] ## logical vector is.picked <- rep.int(FALSE, n) is.picked[picked] <- TRUE # construct result out <- list(marks=NULL, domain=NULL) if("marks" %in% what) { ## label points yesno <- factor(ifelse(is.picked, "yes", "no"), levels=c("no", "yes")) out$marks <- X %mark% yesno } if("domain" %in% what) { if(verbose) cat("Computing cluster set...") if(exact) { domain <- do.call(union.owin, unname(D[is.picked])) domain <- rebound.owin(domain, as.rectangle(W)) } else { domain <- eval.im(is.picked[cellid]) } out$domain <- domain if(verbose) cat("done.\n") } out <- if(length(what) == 1L) out[[what]] else out return(out) } spatstat.explore/R/clarkevans.R0000644000176200001440000002143014611073310016243 0ustar liggesusers## clarkevans.R ## Clark-Evans statistic and test ## $Revision: 1.21 $ $Date: 2023/10/17 05:13:03 $ clarkevans <- function(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) { verifyclass(X, "ppp") W <- X$window # validate correction argument gavecorrection <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf"), multi=TRUE) if(("Donnelly" %in% correction) && (W$type != "rectangle")) { if(gavecorrection) warning("Donnelly correction only available for rectangular windows") correction <- correction[correction != "Donnelly"] } # guard correction applied iff `clipregion' is present isguard <- "guard" %in% correction askguard <- any(isguard) gaveguard <- !is.null(clipregion) if(gaveguard) clipregion <- as.owin(clipregion) if(askguard && !gaveguard) { warning("guard correction not performed; clipregion not specified") correction <- correction[!isguard] } else if(gaveguard && !askguard) correction <- c(correction, "guard") result <- clarkevansCalc(X, correction, clipregion) if(length(result) == 1L) result <- unname(result) return(result) } clarkevans.test <- function(X, ..., correction, clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), method = c("asymptotic", "MonteCarlo"), nsim=999 ) { Xname <- short.deparse(substitute(X)) miss.nsim <- missing(nsim) method <- match.arg(method) verifyclass(X, "ppp") W <- Window(X) nX <- npoints(X) if(missing(correction) || is.null(correction)) { correction <- switch(method, MonteCarlo = "none", asymptotic = if(is.rectangle(W)) "Donnelly" else "cdf") } else { #' validate SINGLE correction correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf")) } switch(correction, none={ corrblurb <- "No edge correction" }, Donnelly={ if(W$type != "rectangle") stop("Donnelly correction only available for rectangular windows") corrblurb <- "Donnelly correction" }, guard={ if(is.null(clipregion)) stop("clipregion not specified") clipregion <- as.owin(clipregion) corrblurb <- "Guard correction" }, cdf={ corrblurb <- "CDF correction" }) # alternative hypothesis if(missing(alternative) || is.null(alternative)) alternative <- "two.sided" alternative <- pickoption("alternative", alternative, c(two.sided="two.sided", less="less", clustered="less", greater="greater", regular="greater")) altblurb <- switch(alternative, two.sided="two-sided", less="clustered (R < 1)", greater="regular (R > 1)") # compute observed value statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion, working=TRUE) working <- attr(statistic, "working") # switch(method, asymptotic = { #' use asymptotic standard Normal reference #' get appropriate standard error SE.R <- switch(correction, none = working[["SEnaive"]], guard = working[["SEguard"]], Donnelly = working[["SEkevin"]], cdf = working[["SEcdf"]]) #' standardised test statistic Z <- as.numeric((statistic - 1)/SE.R) p.value <- switch(alternative, less=pnorm(Z), greater=1 - pnorm(Z), two.sided= 2*(1-pnorm(abs(Z)))) pvblurb <- "Z-test" }, MonteCarlo = { #' Monte Carlo p-value sims <- numeric(nsim) for(i in seq_len(nsim)) { Xsim <- runifpoint(nX, win=W) sims[i] <- clarkevansCalc(Xsim, correction=correction, clipregion=clipregion) } p.upper <- (1 + sum(sims >= statistic))/(1.0 + nsim) p.lower <- (1 + sum(sims <= statistic))/(1.0 + nsim) p.value <- switch(alternative, less=p.lower, greater=p.upper, two.sided=min(1, 2*min(p.lower, p.upper))) pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR with fixed n") }) statistic <- as.numeric(statistic) names(statistic) <- "R" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Clark-Evans test", corrblurb, pvblurb), data.name=Xname) class(out) <- "htest" return(out) } clarkevansCalc <- function(X, correction="none", clipregion=NULL, working=FALSE) { # calculations for Clark-Evans index or test W <- Window(X) areaW <- area(W) npts <- npoints(X) intensity <- npts/areaW # R undefined for empty point pattern if(npts == 0) return(NA) # Dobs = observed mean nearest neighbour distance nndistX <- nndist(X) Dobs <- mean(nndistX) # Dpois = Expected mean nearest neighbour distance for Poisson process Dpois <- 1/(2*sqrt(intensity)) ## initialise statistic <- NULL SE.Dobs <- NULL if(working) { work <- list(areaW=areaW, npts=npts, intensity=intensity, Dobs=Dobs, Dpois=Dpois) #' null standard error of Dobs = mean(nndist(X)) SE.Dobs <- sqrt(((4-pi)*areaW)/(4 * pi))/npts # sic } ## start computing results # Naive uncorrected value if("none" %in% correction) { Rnaive <- Dobs/Dpois statistic <- c(statistic, naive=Rnaive) if(working) { #' null standard error of Clark-Evans statistic Rnaive SE.Rnaive <- SE.Dobs / Dpois work <- append(work, list(SEnaive=SE.Rnaive)) } } #' Donnelly edge correction if("Donnelly" %in% correction) { #' Edge corrected mean nearest neighbour distance, Donnelly 1978 if(W$type == "rectangle") { perim <- perimeter(W) Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts Rkevin <- Dobs/Dkevin if(working) { #' null standard error of adjusted Clark-Evans statistic Rkevin SE.Rkevin <- SE.Dobs / Dkevin work <- append(work, list(perim=perim, Dkevin=Dkevin, SEkevin=SE.Rkevin)) } } else { Rkevin <- NA } statistic <- c(statistic, Donnelly=Rkevin) } # guard area method if("guard" %in% correction && !is.null(clipregion)) { #' use nn distances from points inside `clipregion' ok <- inside.owin(X, , clipregion) Dguard <- mean(nndistX[ok]) Rguard <- Dguard/Dpois statistic <- c(statistic, guard=Rguard) ## additional info if(working) { npts.guard <- sum(ok) areaWclip <- area(clipregion) #' null standard error of Dguard = mean(nndist(X[clipregion])) SE.Dguard <- sqrt((4-pi)/(4 * pi * npts.guard * intensity)) #' null standard error of adjusted Clark-Evans statistic Rguard SE.Rguard <- SE.Dguard / Dpois work <- append(work, list(Dguard=Dguard, npts.guard=npts.guard, SEguard=SE.Rguard)) } } if("cdf" %in% correction) { # compute mean of estimated nearest-neighbour distance distribution G G <- Gest(X) numer <- stieltjes(function(x){x}, G)$km denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km Dcdf <- numer/denom Rcdf <- Dcdf/Dpois statistic <- c(statistic, cdf=Rcdf) if(working) { #' approximate null standard error of Dobs = mean(Gest(X)) SE.Dcdf <- SE.Dobs #' null standard error of Clark-Evans statistic Rcdf SE.Rcdf <- SE.Dcdf/Dpois work <- append(work, list(Dcdf=Dcdf, SEcdf=SE.Rcdf)) } } if(working) attr(statistic, "working") <- work return(statistic) } spatstat.explore/R/nndensity.R0000644000176200001440000000200514611073310016122 0ustar liggesusers# # nndensity.R # # Density estimation based on nn distance # # $Revision: 1.4 $ $Date: 2022/11/03 11:08:33 $ # nndensity <- function(x, ...) { UseMethod("nndensity") } nndensity.ppp <- function(x, k, ..., verbose=TRUE) { if(missing(k) || is.null(k)) { k <- round(sqrt(npoints(x))) if(verbose) cat(paste("k=", k, "\n")) } else { check.1.integer(k) if(verbose && k == 1) warning("k=1 will produce strange results") } # distance to k-th nearest neighbour D <- nnmap(x, k=k, what="dist", ...) # area searched A <- eval.im(pi * D^2) # distance to boundary B <- bdist.pixels(as.owin(D)) # handle edge effects edge <- solutionset(B < D) # centres of all pixels where edge effect occurs xy <- rasterxy.mask(edge, drop=TRUE) # corresponding values of distance rr <- D[edge, drop=TRUE] # compute actual search area X <- as.ppp(xy, W=as.owin(x), check=FALSE) A[edge] <- discpartarea(X, matrix(rr, ncol=1)) # finally compute intensity estimate L <- eval.im(k/A) return(L) } spatstat.explore/R/spatialcdf.R0000644000176200001440000000441114611073310016224 0ustar liggesusers## ## spatialcdf.R ## ## $Revision: 1.10 $ $Date: 2023/04/06 00:14:21 $ ## spatialcdf <- function(Z, weights=NULL, normalise=FALSE, ..., W=NULL, Zname=NULL) { Zdefaultname <- singlestring(short.deparse(substitute(Z))) if(is.character(Z) && length(Z) == 1) { if(is.null(Zname)) Zname <- Z switch(Zname, x={ Z <- function(x,y) { x } }, y={ Z <- function(x,y) { y } }, stop("Unrecognised covariate name") ) } if(is.null(Zname)) Zname <- Zdefaultname ## if(inherits(weights, c("ppm", "kppm", "dppm"))) { model <- weights if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required", call.=FALSE) df <- spatstat.model::spatialCovariateUnderModel(model, Z) G <- with(df, ewcdf(Z, wt, normalise=normalise)) wtname <- if(normalise) "fraction of points" else "number of points" } else { if(is.null(W)) W <- as.owin(weights, fatal=FALSE) if(is.null(W)) W <- as.owin(Z, fatal=FALSE) if(is.null(W)) stop("No information specifying the spatial window") M <- as.mask(W, ...) loc <- as.ppp(rasterxy.mask(M, drop=TRUE), W=W, check=FALSE) pixelarea <- with(unclass(M), xstep * ystep) if(is.null(weights)) { Zvalues <- evaluateCovariateAtPoints(Z, loc, ...) G <- ewcdf(Zvalues, normalise=normalise, adjust=pixelarea) wtname <- if(normalise) "fraction of area" else "area" } else { Zvalues <- evaluateCovariateAtPoints(Z, loc, ...) wtvalues <- evaluateCovariateAtPoints(weights, loc, ...) G <- ewcdf(Zvalues, wtvalues, normalise=normalise, adjust=pixelarea) wtname <- if(normalise) "fraction of weight" else "weight" } } class(G) <- c("spatialcdf", class(G)) attr(G, "call") <- sys.call() attr(G, "Zname") <- Zname attr(G, "ylab") <- paste("Cumulative", wtname) return(G) } plot.spatialcdf <- function(x, ..., xlab, ylab, do.points=FALSE) { if(missing(xlab) || is.null(xlab)) xlab <- attr(x, "Zname") if(missing(ylab) || is.null(ylab)) ylab <- attr(x, "ylab") if(inherits(x, "ecdf")) { plot.ecdf(x, ..., xlab=xlab, ylab=ylab, do.points=do.points) } else { plot.stepfun(x, ..., xlab=xlab, ylab=ylab, do.points=do.points) } } spatstat.explore/R/rose.R0000644000176200001440000002447114611073310015072 0ustar liggesusers#' #' rose.R #' #' Rose diagrams #' #' $Revision: 1.12 $ $Date: 2022/05/23 02:33:06 $ #' rose <- function(x, ...) UseMethod("rose") rose.default <- local({ rose.default <- function(x, breaks = NULL, ..., weights=NULL, nclass=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, main) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) stopifnot(is.numeric(x)) if(!is.null(weights)) check.nvector(weights, length(x), things="observations", vname="weights") #' determine units missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' reduce to [0, 2pi] x <- x %% FullCircle #' determine breakpoints strictly inside full circle breaks <- makebreaks(x, c(0, FullCircle), breaks, nclass) #' histogram without weights h <- do.call.matched(hist.default, list(x=x, breaks=breaks, ..., plot=FALSE), skipargs=graphicsAargh, sieve=TRUE) result <- h$result otherargs <- h$otherargs #' redo weights, if given if(!is.null(weights)) { wh <- whist(x=x, breaks=breaks, weights=weights) result$count <- wh result$density <- wh/diff(breaks) } # do.call(rose.histogram, c(list(x=result, main=main, unit=unit, start=start, clockwise=clockwise), otherargs)) } graphicsAargh <- c("density", "angle", "col", "border", "xlim", "ylim", "xlab", "ylab", "axes") makebreaks <- function(x, r, breaks=NULL, nclass=NULL) { use.br <- !is.null(breaks) if (use.br) { if (!is.null(nclass)) warning("'nclass' not used when 'breaks' is specified") } else if (!is.null(nclass) && length(nclass) == 1L) { breaks <- nclass } else breaks <- "Sturges" use.br <- use.br && (nB <- length(breaks)) > 1L if (use.br) breaks <- sort(breaks) else { if (is.character(breaks)) { breaks <- match.arg(tolower(breaks), c("sturges", "fd", "freedman-diaconis", "scott")) breaks <- switch(breaks, sturges = nclass.Sturges(x), `freedman-diaconis` = , fd = nclass.FD(x), scott = nclass.scott(x), stop("unknown 'breaks' algorithm")) } else if (is.function(breaks)) { breaks <- breaks(x) } if (length(breaks) == 1) { if (!is.numeric(breaks) || !is.finite(breaks) || breaks < 1L) stop("invalid number of 'breaks'") breaks <- seq(r[1], r[2], length.out=breaks) } else { if (!is.numeric(breaks) || length(breaks) <= 1) stop(gettextf("Invalid breakpoints produced by 'breaks(x)': %s", format(breaks)), domain = NA) breaks <- sort(breaks) } } return(breaks) } rose.default }) rose.histogram <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) #' determine units missu <- missing(unit) unit <- match.arg(unit) #' validate bks <- x$breaks unit <- validate.angles(bks, unit, missu) # FullCircle <- switch(unit, degree = 360, radian = 2*pi) #' get sector sizes y <- x$density ymax <- max(y) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * ymax DD <- disc(R) Rout <- (1 + outsidespace) * R disco <- disc(Rout) dont.complain.about(DD, disco) result <- do.call.matched(plot.owin, resolve.defaults(list(x=quote(disco), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=quote(DD), hatch=FALSE, add=TRUE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") if(do.plot) { #' draw sectors ang <- ang2rad(bks, unit=unit, start=start, clockwise=clockwise) eps <- min(diff(ang), pi/128)/2 for(i in seq_along(y)) { aa <- seq(ang[i], ang[i+1], by=eps) aa[length(aa)] <- ang[i+1] yi <- y[i] xx <- c(0, yi * cos(aa), 0) yy <- c(0, yi * sin(aa), 0) do.call.matched(polygon, list(x=xx, y=yy, ...)) } #' add tick marks circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } #' return(invisible(result)) } rose.density <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- x$x rad <- x$y missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } rose.fv <- function(x, ..., unit=c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) ang <- with(x, .x) rad <- with(x, .y) missu <- missing(unit) unit <- match.arg(unit) unit <- validate.angles(ang, unit, missu) #' result <- roseContinuous(ang, rad, unit, ..., start=start, clockwise=clockwise, main=main, labels=labels, at=at, do.plot=do.plot) return(invisible(result)) } roseContinuous <- function(ang, rad, unit, ..., start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot=TRUE) { rmax <- max(rad) #' draw disc insideclearance <- 0.1 outsidespace <- if(!is.null(at) && length(at) == 0) 0 else if(identical(labels, FALSE)) 0.1 else 0.25 R <- (1+insideclearance) * rmax DD <- disc(R) Rout <- (1 + outsidespace) * R disco <- disc(Rout) dont.complain.about(DD, disco) result <- do.call.matched(plot.owin, resolve.defaults(list(x=quote(disco), main=main, type="n"), list(...))) do.call.matched(plot.owin, resolve.defaults(list(x=quote(DD), add=TRUE, hatch=FALSE), list(...)), extrargs=graphicsPars("owin"), skipargs="col") #' draw plot if(do.plot) { ang <- ang2rad(ang, unit=unit, start=start, clockwise=clockwise) xx <- rad * cos(ang) yy <- rad * sin(ang) do.call.matched(polygon, list(x=xx, y=yy, ...), extrargs="lwd") circticks(R, at=at, unit=unit, start=start, clockwise=clockwise, labels=labels) } return(result) } ang2rad <- local({ compasspoints <- c(E=0,N=90,W=180,S=270) ang2rad <- function(ang, unit=c("degree", "radian"), start=0, clockwise=FALSE) { unit <- match.arg(unit) clocksign <- if(clockwise) -1 else 1 stopifnot(length(start) == 1) if(is.character(start)) { if(is.na(match(toupper(start), names(compasspoints)))) stop(paste("Unrecognised compass point", sQuote(start)), call.=FALSE) startdegrees <- compasspoints[[start]] start <- switch(unit, degree = startdegrees, radian = pi * (startdegrees/180)) # start is measured anticlockwise ang <- start + clocksign * ang } else { stopifnot(is.numeric(start)) # start is measured according to value of 'clockwise' ang <- clocksign * (start + ang) } rad <- switch(unit, degree = pi * (ang/180), radian = ang) return(rad) } ang2rad }) circticks <- function(R, at=NULL, unit=c("degree", "radian"), start=0, clockwise=FALSE, labels=TRUE) { unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.null(at)) { at <- FullCircle * (0:23)/24 major <- ((0:23) %% 6 == 0) } else { if(length(at) == 0) return(invisible(NULL)) nat <- (at/FullCircle) * 4 major <- abs(nat - round(nat)) < 0.01 } atradians <- ang2rad(ang=at, unit=unit, start=start, clockwise=clockwise) tx <- R * cos(atradians) ty <- R * sin(atradians) expan <- ifelse(major, 1.1, 1.05) segments(tx, ty, expan * tx, expan * ty, lwd=major+1) if(!identical(labels, FALSE)) { if(identical(labels, TRUE)) { labels <- switch(unit, degree=paste(round(at)), radian=parse(text= simplenumber(at/pi, "pi", "*", 1e-3))) } else stopifnot(is.vector(labels) && length(labels) == length(at)) big <- expan + 0.1 text(big * tx, big * ty, labels=labels) } invisible(NULL) } validate.angles <- function(angles, unit=c("degree", "radian"), guess=TRUE) { #' validate width <- diff(range(angles)) if(missing(unit) && guess && width <= 6.2832) { warning("Very small range of angles: treating them as radian") unit <- "radian" } else unit <- match.arg(unit) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(width > 1.002 * FullCircle) stop("Range of angles exceeds a full circle") return(unit) } spatstat.explore/R/thresholding.R0000644000176200001440000000776114611073311016620 0ustar liggesusers#' #' Selection of threshold #' #' Copyright (c) 2020 Adrian Baddeley, Warick Brown, Robin K. Milne, #' Gopalan Nair, Suman Rakshit, Tom Lawrence, Aloke Phatak, Shih Ching Fu #' #' GNU Public Licence >= 2 #' #' $Revision: 1.5 $ $Date: 2024/01/29 08:00:06 $ #' #' #' threshold selection #' inputs: #' X deposit locations #' Z covariate thresholdSelect <- function(X, Z, method=c("Y", "LL", "AR", "t", "C"), Zname) { if(!is.ppp(X)) stop("X should be a point pattern (class ppp)") if(missing(Zname)) Zname <- short.deparse(substitute(Z)) method <- match.arg(method) a <- spatialCovariateEvidence(X, Z, jitter=FALSE)$values FF <- ecdf(a$ZX) GG <- ecdf(a$Zvalues) n <- npoints(X) A <- area(Window(a$Zimage)) zrange <- range(range(a$ZX), range(a$Zimage)) zz <- seq(zrange[1], zrange[2], length.out=1028) nz <- n * (pz <- FF(zz)) Az <- A * (sz <- GG(zz)) Cz <- log((nz/Az)/((n-nz)/(A-Az))) yy <- switch(method, C = Cz, t = Cz/sqrt(1/nz + 1/(n-nz)), LL = { n * log(nz/Az) - (n-nz) * Cz - n }, AR = { sqrt(sz * (1-sz)) * (nz/sz - (n-nz)/(1-sz)) }, Y = { pz - sz }) yy[!is.finite(yy)] <- -Inf critname <- switch(method, C = "WofE contrast", t = "studentised contrast", LL = "profile log likelihood", AR = "Akman-Raftery criterion", Y = "Youden criterion") bw.optim(yy, zz, optimum="max", cvname=method, hname=Zname, criterion=critname, unitname=if(inherits(Z, "distfun")) unitname(X) else NULL, hword="threshold") } #' confidence interval for threshold thresholdCI <- local({ thresholdCI <- function(X, Z, confidence=0.95, nsim=1000, parametric=FALSE) { #' bootstrap confidence interval for Youden estimate only. if(!is.ppp(X)) stop("X should be a point pattern (class ppp)") a <- spatialCovariateEvidence(X, Z, jitter=FALSE)$values FF <- ecdf(a$ZX) GG <- ecdf(a$Zvalues) est <- Youden(FF,GG) b <- simthresh(FF, GG, npoints(X), nsim, parametric) zCI <- quantCI(b$z, est[["z"]], confidence=confidence) sCI <- quantCI(b$s, est[["s"]], confidence=confidence) rbind(z=zCI, s=sCI) } #' Underlying code based on cumulative distribution functions #' inputs: #' F = ecdf of covariate values for data points #' G = ecdf of covariate values for study region Youden <- function(F, G) { zz <- get("x", envir=environment(F)) iopt <- which.max(F(zz) - G(zz)) zopt <- zz[iopt] sopt <- G(zopt) return(c(z=zopt, s=sopt)) } Fpredicted <- function(F, G, zest) { if(missing(zest)) zest <- Youden(F,G)[["z"]] plow <- F(zest) glow <- G(zest) #' mixture of unif[0, glow] and unif[glow, 1] with weights plow, 1-plow zz <- get("x", envir=environment(G)) pp <- get("y", envir=environment(G)) qq <- ifelse(pp < glow, plow*(pp/glow), plow + (1-plow)*(pp-glow)/(1-glow)) FF <- approxfun(zz, qq, rule=2) return(FF) } inversefunction <- function(F) { zz <- get("x", envir=environment(F)) pz <- get("y", envir=environment(F)) Finv <- approxfun(pz, zz, rule=2) return(Finv) } simthresh <- function(F, G, ndata, nsim=100, parametric) { check.1.integer(nsim) stopifnot(nsim > 1) if(parametric) F <- Fpredicted(F, G) Finv <- inversefunction(F) zout <- sout <- numeric(nsim) zz <- get("x", envir=environment(G)) for(isim in 1:nsim) { zsim <- Finv(runif(ndata)) Fhat <- ecdf(zsim) iopt <- which.max(Fhat(zz) - G(zz)) zopt <- zz[iopt] sopt <- G(zopt) zout[isim] <- zopt sout[isim] <- sopt } return(data.frame(z=zout, s=sout)) } quantCI <- function(x, xest, confidence=0.95) { xleft <- quantile(x[x<=xest], 1-confidence) xright <- quantile(x[x>=xest], confidence) achieved <- mean(x >= xleft & x <= xright) return(c(lo=unname(xleft), hi=unname(xright), conf=achieved)) } thresholdCI }) spatstat.explore/R/Kest.R0000644000176200001440000010421714611073307015033 0ustar liggesusers# # Kest.R Estimation of K function # # $Revision: 5.139 $ $Date: 2022/06/30 07:49:47 $ # # # -------- functions ---------------------------------------- # Kest() compute estimate of K # using various edge corrections # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lest" <- function(X, ..., correction) { if(missing(correction)) correction <- NULL K <- Kest(X, ..., correction=correction) L <- eval.fv(sqrt(K/pi), dotonly=FALSE) # handle variance estimates if(any(varcols <- colnames(K) %in% c("rip", "ls"))) { r <- with(L, .x) L[,varcols] <- as.data.frame(K)[,varcols]/(2 * pi * r)^2 # fix 0/0 n <- npoints(X) A <- area(Window(X)) if(any(colnames(K) == "rip")) L[r == 0, "rip"] <- (2 * A/(n-1)^2)/(4 * pi) if(any(colnames(K) == "ls")) L[r == 0, "ls"] <- (2 * A/(n * (n-1)))/(4 * pi) } # relabel the fv object L <- rebadge.fv(L, quote(L(r)), "L", names(K), new.labl=attr(K, "labl")) # return(L) } "Kest"<- function(X, ..., r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) && !is.null(nlarge) rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) npairs <- npts * (npts - 1) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- npairs/(areaW^2) lambda2area <- npairs/areaW samplesize <- npairs if(!is.null(domain)) { ## estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, W)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) ## use code in Kdot/Kmulti indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) Kd <- Kdot(X %mark% indom, i="TRUE", r=r, breaks=breaks, correction=correction, ratio=ratio, rmax=rmax, domainI=domain) # relabel and exit Kd <- rebadge.fv(Kd, quote(K(r)), "K") return(Kd) } rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambda) if(is.infinite(rmaxdefault)) rmaxdefault <- diameter(W) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", rigid="rigid", periodic="periodic", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) # replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) # retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked <- correction.fast || (nlarge.given && large.n.trigger) if(asked && !can.do.fast) warning("r values not evenly spaced - cannot use efficient code") if(will.do.fast) { # determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { # some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } # restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) if(bord) Kb <- Kborder.engine(X, max(r), length(r), correction, ratio=ratio) if(none) Kn <- Knone.engine(X, max(r), length(r), ratio=ratio) if(bord && none) { Kn <- Kn[ , names(Kn) != "theo"] yn <- fvnames(Kb, ".y") Kbn <- if(!ratio) bind.fv(Kb, Kn, preferred=yn) else bind.ratfv(Kb, Kn, preferred=yn) return(Kbn) } if(bord) return(Kb) if(none) return(Kn) } unsupported.Krect <- c("rigid", "periodic") do.fast.rectangle <- can.do.fast && is.rectangle(W) && spatstat.options("use.Krect") && !any(correction %in% unsupported.Krect) if(do.fast.rectangle) { ########################################### ## Fast code for rectangular window ########################################### K <- Krect.engine(X, rmax, length(r), correction, ratio=ratio) attr(K, "alim") <- alim } else { ########################################### ## Slower code ########################################### ## this will be the output data frame Kdf <- data.frame(r=r, theo = pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- ratfv(Kdf, NULL, npairs, "r", quote(K(r)), "theo", NULL, alim, c("r","%s[pois](r)"), desc, fname="K", ratio=ratio) ## Identify all close pairs up to distance 'rmax' rmax <- max(r) if(all(correction == "periodic")) { ## not needed in periodic case ## Assign null value to placate package checker close <- DIJ <- NULL } else { ## usual case ## Identify all close pairs needxy <- correction %in% c("translate", "isotropic") what <- if(any(needxy)) "all" else "ijd" close <- closepairs(X, rmax, what=what) DIJ <- close$d } ## precompute set covariance of window gW <- NULL if(any(correction %in% c("translate", "rigid", "isotropic"))) gW <- setcov(W) if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/lambda2area ## uncorrected estimate of K K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(un=Kun), denominator = npairs, labl = "hat(%s)[un](r)", desc = "uncorrected estimate of %s", preferred = "un", ratio=ratio) } if(any(correction == "periodic")) { ## periodic correction ## Find close pairs of points in periodic distance closeP <- closepairs(X, rmax, what="ijd", periodic=TRUE) DIJP <- closeP$d ## Compute unweighted histogram wh <- whist(DIJP, breaks$val) Kper <- cumsum(wh)/lambda2area ## periodic correction estimate of K K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(per=Kper), denominator = npairs, labl = "hat(%s)[per](r)", desc = "periodic-corrected estimate of %s", preferred = "per", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { ## border method ## Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] ## apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { ## modified border correction denom.area <- eroded.areas(W, r) Kbm <- RS$numerator/(lambda2 * denom.area) samplesizeKbm <- npairs * (denom.area/areaW) K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(bord.modif=Kbm), denominator = samplesizeKbm, labl = "hat(%s)[bordm](r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } if(any(correction == "border")) { Kb <- RS$numerator/(lambda * RS$denom.count) samplesizeKb <- (npts-1) * RS$denom.count K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(border=Kb), denominator = samplesizeKb, labl = "hat(%s)[bord](r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } } if(any(correction == "translate")) { ## Ohser-Stoyan translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE, gW = gW, give.rmax=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/lambda2area h <- attr(edgewt, "rmax") Ktrans[r >= h] <- NA K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(trans=Ktrans), denominator = npairs, labl = "hat(%s)[trans](r)", desc = "translation-corrected estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "rigid")) { ## Ohser-Stoyan rigid motion correction CW <- rotmean(gW) edgewt <- areaW/as.function(CW)(DIJ) wh <- whist(DIJ, breaks$val, edgewt) Krigid <- cumsum(wh)/lambda2area h <- rmax.Rigid(X, gW) #sic: X not W Krigid[r >= h] <- NA K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(rigid=Krigid), denominator = npairs, labl = "hat(%s)[rigid](r)", desc = "rigid motion-corrected estimate of %s", preferred = "rigid", ratio=ratio) } if(any(correction == "isotropic")) { ## Ripley isotropic correction XI <- ppp(close$xi, close$yi, window=W, check=FALSE) edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/lambda2area h <- boundingradius(W) Kiso[r >= h] <- NA K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(iso=Kiso), denominator = npairs, labl = "hat(%s)[iso](r)", desc = "Ripley isotropic correction estimate of %s", preferred = "iso", ratio=ratio) } } ############################# ## VARIANCE APPROXIMATION ############################# if(var.approx && !any(correction == "isotropic")) { warn.once("varapproxiso", "Ignored argument 'var.approx=TRUE'; the variance approximation", "is available only for the isotropic correction") var.approx <- FALSE } if(var.approx) { ## Compute variance approximations A <- areaW P <- perimeter(W) n <- npts ## Ripley asymptotic approximation rip <- 2 * ((A/(n-1))^2) * (pi * r^2/A + 0.96 * P * r^3/A^2 + 0.13 * (n/A) * P * r^5/A^2) ## vsamplesize <- (npts - 1)^2 K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(rip=rip), denominator = vsamplesize, labl = "vR(r)", desc = "Ripley approximation to var(%s) under CSR", preferred = "iso", ratio=ratio) if(W$type == "rectangle") { ## Lotwick-Silverman a1r <- (0.21 * P * r^3 + 1.3 * r^4)/A^2 a2r <- (0.24 * P * r^5 + 2.62 * r^6)/A^3 ## contains correction to typo on p52 of Diggle 2003 ## cf Lotwick & Silverman 1982 eq (5) br <- (pi * r^2/A) * (1 - pi * r^2/A) + (1.0716 * P * r^3 + 2.2375 * r^4)/A^2 vls <- (A^2) * (2 * br - a1r + (n-2) * a2r)/(n*(n-1)) ## add column K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(ls=vls), denominator = vsamplesize, "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso", ratio=ratio) } } ### FINISH OFF ##### ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- setdiff(nama, c("r", "rip", "ls")) ## unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } ################################################################ ############# SUPPORTING ALGORITHMS ########################### ################################################################ Kount <- function(dIJ, bI, b, breaks) { # # "internal" routine to compute border-correction estimate of K or Kij # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # b: vector of ALL distances to window boundary # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # histogram of noncensored distances nco <- whist(dIJ[uncen], breaks$val) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], breaks$val) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, breaks$val) # count censoring times beyond rightmost breakpoint uppercen <- sum(b > max(breaks$val)) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denom.count <- RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denom.count) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denom.count=denom.count)) } #### interface to C code for border method Kborder.engine <- function(X, rmax, nr=100, correction=c("border", "bord.modif"), weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) lambda <- npts/areaW npairs <- npts * (npts - 1) lambda2 <- npairs/(areaW^2) lambda2area <- npairs/areaW if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- ratfv(Kdf, NULL, npairs, "r", quote(K(r)), "theo", . ~ r, c(0,rmax), c("r","%s[pois](r)"), desc, fname="K", unitname=unitname(X), ratio=ratio) ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # boundary distances b <- bdist.points(Xsort) # call the C code if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C(SE_KborderI, nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), denom=as.integer(integer(nr)), PACKAGE="spatstat.explore") } else { # no - need double precision storage res <- .C(SE_KborderD, nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE="spatstat.explore") } if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) Kbm <- res$numer/(lambda2 * denom.area) samplesizeKbm <- npairs * (denom.area/areaW) Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(bord.modif=Kbm), denominator=samplesizeKbm, "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if("border" %in% correction) { Kb <- res$numer/(lambda * res$denom) samplesizeKb <- (npts - 1) * res$denom Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(border=Kb), denominator=samplesizeKb, "hat(%s)[bord](r)", "border-corrected estimate of %s", "border", ratio=ratio) } } else { ## weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C(SE_Kwborder, nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), PACKAGE="spatstat.explore") if("border" %in% correction) { numKb <- res$numer denKb <- res$denom Kfv <- bind.ratfv(Kfv, numerator=data.frame(border=numKb), denominator=data.frame(border=denKb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border", ratio=ratio) } if("bord.modif" %in% correction) { numKbm <- res$numer denKbm <- eroded.areas(W, r) Kfv <- bind.ratfv(Kfv, numerator=data.frame(bord.modif=numKbm), denominator=data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } } return(Kfv) } Knone.engine <- function(X, rmax, nr=100, weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) ## lambda <- npts/areaW npairs <- npts * (npts - 1) lambda2 <- npairs/(areaW^2) lambda2area <- npairs/areaW if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- ratfv(Kdf, NULL, npairs, "r", quote(K(r)), "theo", . ~ r, c(0,rmax), c("r","%s[pois](r)"), desc, fname="K", unitname=unitname(X), ratio=ratio) ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # call the C code if(is.null(weights)) { ## determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { ## yes - use faster integer arithmetic res <- .C(SE_KnoneI, nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), PACKAGE="spatstat.explore") } else { ## no - need double precision storage res <- .C(SE_KnoneD, nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE="spatstat.explore") } Kun <- res$numer/lambda2area samplesizeKun <- npairs } else { ## weighted version if(is.numeric(weights)) { if(length(weights) != npts) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C(SE_Kwnone, nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), PACKAGE="spatstat.explore") samplesizeKun <- totwt <- sum(weights) Kun <- res$numer/totwt } # tack on to fv object Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(un=Kun), denominator=samplesizeKun, "hat(%s)[un](r)", "uncorrected estimate of %s", "un", ratio=ratio) return(Kfv) } rmax.rule <- function(fun="K", W, lambda) { if(gotW <- !missing(W)) verifyclass(W, "owin") if(gotL <- !missing(lambda)) lambda <- as.numeric(lambda) # can be vector gotall <- gotW && gotL switch(fun, K = { ## Ripley's Rule ripley <- if(gotW) shortside(Frame(W))/4 else Inf ## Count at most 1000 neighbours per point rlarge <- if(gotL) sqrt(1000 /(pi * lambda)) else Inf rmax <- min(rlarge, ripley) }, Kscaled = { ## rule of thumb for Kscaled rdiam <- if(gotall) diameter(Frame(W))/2 * sqrt(lambda) else Inf rmax <- min(10, rdiam) }, F = , G = , J = { # rule of thumb rdiam <- if(gotW) diameter(Frame(W))/2 else Inf # Poisson process has F(rlarge) = 1 - 10^(-5) rlarge <- if(gotL) sqrt(log(1e5)/(pi * lambda)) else Inf rmax <- min(rlarge, rdiam) }, stop(paste("Unrecognised function type", sQuote(fun))) ) return(rmax) } implemented.for.K <- function(correction, windowtype, explicit) { if(any(b <- (correction == "best"))) { ## replace 'best' by the best available correction correction[b] <- switch(windowtype, mask="translate", "isotropic") } whinge <- NULL if(windowtype != "rectangle" && any(pe <- (correction == "periodic"))) { whinge <- "Periodic correction is not defined for non-rectangular windows" correction <- correction[!pe] } if(windowtype == "mask" && any(iso <- (correction == "iso"))) { whinge <- pasteN(whinge, "Isotropic correction is not implemented for binary mask windows", collapse=" and ") correction <- correction[!iso] } if(explicit && !is.null(whinge)) { if(length(correction)) { ## some desired corrections remain; warn about the deleted ones warning(whinge, call.=FALSE) } else { ## none of the desired corrections are supported stop(whinge, call.=FALSE) } } return(correction) } good.correction.K <- function(X) { nX <- npoints(X) W <- as.owin(X) avail <- c("none", if(nX < 1e5) "border" else NULL, if(nX < 3000)"translate" else NULL, if(nX < 1000 && !is.mask(W)) "isotropic" else NULL) chosen <- rev(avail)[1] return(chosen) } Krect.engine <- function(X, rmax, nr=100, correction, weights=NULL, ratio=FALSE, fname="K", use.integers=TRUE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) areaW <- area(W) width <- sidelengths(W)[1] height <- sidelengths(W)[2] lambda <- npts/areaW npairs <- npts * (npts - 1) lambda2 <- npairs/(areaW^2) lambda2area <- npairs/areaW if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) if(weighted <- !is.null(weights)) { ## coerce weights to a vector if(is.numeric(weights)) { check.nvector(weights, npts, vname="weights") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(anyNA(weights)) stop("domain of weights image does not contain all points of X") } totalweight <- sum(weights) } # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(weighted) areaW else (lambda2 * areaW) Kfv <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", . ~ r, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, unitname=unitname(X), ratio=ratio) ####### prepare data ############ if(!all(correction == "translate")) { ## Ensure rectangle has its bottom left corner at the origin if(W$xrange[1] != 0 || W$yrange[1] != 0) { X <- shift(X, origin="bottomleft") W <- as.owin(X) } } ## sort in ascending order of x coordinate orderX <- fave.order(X$x) x <- X$x[orderX] y <- X$y[orderX] if(weighted) wt <- weights[orderX] ## establish algorithm parameters doIso <- "isotropic" %in% correction doTrans <- "translate" %in% correction doBord <- any(c("border", "bord.modif") %in% correction) doUnco <- "none" %in% correction trimedge <- spatstat.options("maxedgewt") ## allocate space for results ziso <- numeric(if(doIso) nr else 1L) ztrans <- numeric(if(doTrans) nr else 1L) ## call the C code if(weighted) { ## weighted version zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C(SE_KrectWtd, width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(wt), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE="spatstat.explore") } else if(use.integers && npts < sqrt(.Machine$integer.max)) { ## unweighted ## numerator of border correction can be stored as an integer ## use faster integer arithmetic zbnumer <- integer(if(doBord) nr else 1L) zbdenom <- integer(if(doBord) nr else 1L) zunco <- integer(if(doUnco) nr else 1L) res <- .C(SE_KrectInt, width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.integer(zbnumer), bdenom=as.integer(zbdenom), unco=as.integer(zunco), PACKAGE="spatstat.explore") } else { ## unweighted ## need double precision storage zbnumer <- numeric(if(doBord) nr else 1L) zbdenom <- numeric(if(doBord) nr else 1L) zunco <- numeric(if(doUnco) nr else 1L) res <- .C(SE_KrectDbl, width=as.double(width), height=as.double(height), nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), trimedge=as.double(trimedge), doIso=as.integer(doIso), doTrans=as.integer(doTrans), doBord=as.integer(doBord), doUnco=as.integer(doUnco), iso=as.double(ziso), trans=as.double(ztrans), bnumer=as.double(zbnumer), bdenom=as.double(zbdenom), unco=as.double(zunco), PACKAGE="spatstat.explore") } ## Process corrections in reverse order of priority ## Uncorrected estimate if("none" %in% correction) { if(!weighted) { Kun <- res$unco/lambda2area samplesizeKun <- npairs } else { Kun <- res$unco/areaW samplesizeKun <- totalweight } Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient = data.frame(un=Kun), denominator=samplesizeKun, makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un", ratio=ratio) } ## Modified border correction if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) if(!weighted) { Kbm <- res$bnumer/lambda2area samplesizeKbm <- npairs * (denom.area/areaW) } else { Kbm <- res$bnumer/denom.area samplesizeKbm <- denom.area } Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(bord.modif=Kbm), denominator=samplesizeKbm, makefvlabel(NULL, "hat", fname, "bordm"), "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } ## Border correction if("border" %in% correction) { if(!weighted) { Kb <- res$bnumer/(lambda * res$bdenom) samplesizeKb <- (npts - 1) * res$bdenom } else { Kb <- res$bnumer/res$bdenom samplesizeKb <- res$bdenom } Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(border=Kb), denominator=samplesizeKb, makefvlabel(NULL, "hat", fname, "bord"), "border-corrected estimate of %s", "border", ratio=ratio) } ## translation correction if("translate" %in% correction) { if(!weighted) { Ktrans <- res$trans/lambda2area samplesizeKtrans <- npairs } else { Ktrans <- res$trans/areaW samplesizeKtrans <- areaW } h <- diameter(as.rectangle(W))/2 Ktrans[r >= h] <- NA Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(trans=Ktrans), denominator=samplesizeKtrans, makefvlabel(NULL, "hat", fname, "trans"), "translation-corrected estimate of %s", "trans", ratio=ratio) } ## isotropic correction if("isotropic" %in% correction) { if(!weighted) { Kiso <- res$iso/lambda2area samplesizeKiso <- npairs } else { Kiso <- res$iso/areaW samplesizeKiso <- areaW } h <- diameter(as.rectangle(W))/2 Kiso[r >= h] <- NA Kfv <- bind.ratfv(Kfv, numerator=NULL, quotient=data.frame(iso=Kiso), denominator=samplesizeKiso, makefvlabel(NULL, "hat", fname, "iso"), "isotropic-corrected estimate of %s", "iso", ratio=ratio) } ## return(Kfv) } spatstat.explore/R/fgk3.R0000644000176200001440000003773414611073310014762 0ustar liggesusers# # $Revision: 1.29 $ $Date: 2022/05/21 08:53:38 $ # # Estimates of F, G and K for three-dimensional point patterns # # # ............ user interface ............................. # K3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), ratio=FALSE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In K3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) np <- npoints(X) denom <- np * (np-1)/volume(B) # this will be the output data frame K <- data.frame(r=r, theo= (4/3) * pi * r^3) desc <- c("distance argument r", "theoretical Poisson %s") K <- ratfv(K, NULL, denom, "r", quote(K[3](r)), "theo", NULL, c(0,rmax/2), c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "3"), ratio=ratio) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation") K <- bind.ratfv(K, data.frame(trans=u$num), u$denom, "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction %in% "isotropic")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic") K <- bind.ratfv(K, data.frame(iso=u$num), u$denom, "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } G3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("rs", "km", "Hanisch")) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Hanisch="han", hanisch="han", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In G3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) lambda <- nrow(coo)/volume(B) # this will be the output data frame G <- data.frame(r=r, theo= 1 - exp( - lambda * (4/3) * pi * r^3)) desc <- c("distance argument r", "theoretical Poisson %s") G <- fv(G, "r", substitute(G3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="G3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # collect four histograms for censored data u <- g3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval) if("rs" %in% correction) G <- bind.fv(G, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) G <- bind.fv(G, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("han" %in% correction) G <- bind.fv(G, data.frame(han=u$han), "%s[han](r)", "Normalised Hanisch estimate of %s", "han") # default is to display them all formula(G) <- . ~ r unitname(G) <- unitname(X) return(G) } F3est <- function(X, ..., rmax=NULL, nrval=128, vside=NULL, correction=c("rs", "km", "cs"), sphere=c("fudge", "ideal", "digital")) { stopifnot(inherits(X, "pp3")) sphere <- match.arg(sphere) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Kaplan="km", cs="cs", CS="cs", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In F3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) vol <- volume(B) lambda <- nrow(coo)/vol # determine voxel size if(missing(vside)) { voxvol <- vol/spatstat.options("nvoxel") vside <- voxvol^(1/3) # ensure the shortest side is a whole number of voxels s <- shortside(B) m <- ceiling(s/vside) vside <- s/m } # compute theoretical value switch(sphere, ideal = { volsph <- (4/3) * pi * r^3 spherename <- "ideal sphere" }, fudge = { volsph <- 0.78 * (4/3) * pi * r^3 spherename <- "approximate sphere" }, digital = { volsph <- digital.volume(c(0, rmax), nrval, vside) spherename <- "digital sphere" }) theo.desc <- paste("theoretical Poisson %s using", spherename) # this will be the output data frame FF <- data.frame(r = r, theo = 1 - exp( - lambda * volsph)) desc <- c("distance argument r", theo.desc) labl <- c("r","%s[pois](r)") FF <- fv(FF, "r", substitute(F3(r), NULL), "theo", , c(0,rmax/2), labl, desc, fname="F3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # go u <- f3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, vside=vside) if("rs" %in% correction) FF <- bind.fv(FF, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) FF <- bind.fv(FF, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("cs" %in% correction) FF <- bind.fv(FF, data.frame(cs=u$cs), "%s[cs](r)", "Chiu-Stoyan estimate of %s", "cs") # default is to display them all formula(FF) <- . ~ r unitname(FF) <- unitname(X) return(FF) } pcf3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In pcf3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) if(is.null(delta)) { lambda <- npoints(X)/volume(B) delta <- adjust * 0.26/lambda^(1/3) } if(biascorrect) { # bias correction rondel <- r/delta biasbit <- ifelseAX(rondel > 1, 1, (3/4)*(rondel + 2/3 - (1/3)*rondel^3)) } # this will be the output data frame g <- data.frame(r=r, theo=rep.int(1, length(r))) desc <- c("distance argument r", "theoretical Poisson %s") g <- fv(g, "r", quote(g[3](r)), "theo", , c(0,rmax/2), c("r", "{%s[%s]^{pois}}(r)"), desc, fname=c("g", "3")) # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation", delta=delta) gt <- u$f if(biascorrect) gt <- gt/biasbit g <- bind.fv(g, data.frame(trans=gt), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction %in% "isotropic")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic", delta=delta) gi <- u$f if(biascorrect) gi <- gi/biasbit g <- bind.fv(g, data.frame(iso=gi), "{hat(%s)[%s]^{iso}}(r)", "isotropic-corrected estimate of %s", "iso") } # default is to display them all formula(g) <- . ~ r unitname(g) <- unitname(X) attr(g, "delta") <- delta return(g) } # ............ low level code .............................. # k3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation") { code <- switch(correction, translation=0, isotropic=1) res <- .C(SE_RcallK3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE="spatstat.explore") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # # g3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=10, correction="Hanisch G3") { code <- switch(correction, "minus sampling"=1, "Hanisch G3"=3) res <- .C(SE_RcallG3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), PACKAGE="spatstat.explore") return(list(range = c(0, rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # f3engine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, range=c(0,1.414), nval=25, correction="minus sampling") { # code <- switch(correction, "minus sampling"=1, no=0) res <- .C(SE_RcallF3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(range[1L]), as.double(range[2L]), m=as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(code), PACKAGE="spatstat.explore") r <- seq(from=range[1L], to=range[2L], length.out=nval) f <- with(res, ifelseXB(denom > 0, num/denom, 1)) return(list(r = r, f = f, num=res$num, denom=res$denom, correction=correction)) } f3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, rmax=1, nrval=25) { # res <- .C(SE_RcallF3cen, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(vside), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE="spatstat.explore") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) cs <- H/max(H[is.finite(H)]) # return(list(rs=rs, km=km$km, hazard=km$lambda, cs=cs, r=r)) } g3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=25) { # res <- .C(SE_RcallG3cen, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1L)), uppercen = as.integer(integer(1L)), PACKAGE="spatstat.explore") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) han <- H/max(H[is.finite(H)]) return(list(rs=rs, km=km$km, hazard=km$lambda, han=han, r=r)) } pcf3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation", delta=rmax/10) { code <- switch(correction, translation=0, isotropic=1) res <- .C(SE_Rcallpcf3, as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1L]), as.double(box[2L]), as.double(box[3L]), as.double(box[4L]), as.double(box[5L]), as.double(box[6L]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), method=as.integer(code), delta=as.double(delta), PACKAGE="spatstat.explore") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # ------------------------------------------------------------ # volume of a sphere (exact and approximate) # sphere.volume <- function(range=c(0,1.414), nval=10) { rr <- seq(from=range[1L], to=range[2L], length.out=nval) return( (4/3) * pi * rr^3) } digital.volume <- function(range=c(0, 1.414), nval=25, vside= 0.05) { # Calculate number of points in digital sphere # by performing distance transform for a single point # in the middle of a suitably large box # # This takes EIGHT TIMES AS LONG as the corresponding empirical F-hat !!! # w <- 2 * range[2L] + 2 * vside # dvol <- .C(SE_RcallF3, as.double(w/2), as.double(w/2), as.double(w/2), as.integer(1L), as.double(0), as.double(w), as.double(0), as.double(w), as.double(0), as.double(w), as.double(vside), as.double(range[1L]), as.double(range[2L]), as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(0), PACKAGE="spatstat.explore")$num # (vside^3) * dvol } spatstat.explore/R/varblock.R0000644000176200001440000001307614611073311015725 0ustar liggesusers# # varblock.R # # Variance estimation using block subdivision # # $Revision: 1.22 $ $Date: 2022/01/04 05:30:06 $ # varblock <- local({ getrvalues <- function(z) { with(z, .x) } stepsize <- function(z) { mean(diff(z)) } dofun <- function(domain, fun, Xpp, ...) { fun(Xpp, ..., domain=domain) } varblock <- function(X, fun=Kest, blocks=quadrats(X, nx=nx, ny=ny), ..., nx=3, ny=nx, confidence=0.95) { stopifnot(is.ppp(X)) stopifnot(is.tess(blocks)) stopifnot(is.function(fun) || is.character(fun)) if(is.character(fun)) fun <- get(fun, mode="function") ## validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence probs <- c(alpha/2, 1-alpha/2) ## determine whether 'fun' has an argument called 'domain' canrestrict <- ("domain" %in% names(formals(fun))) || samefunction(fun, pcf) || samefunction(fun, Lest) ## check there's at least one point in each block Y <- split(X, blocks) nums <- sapply(Y, npoints) blockok <- (nums > 0) if(some.zeroes <- any(!blockok)) warning("Some tiles contain no data: they are discarded") if(!canrestrict) { ## divide data into disjoint blocks if(some.zeroes) Y <- Y[blockok] n <- length(Y) if(n <= 1) stop("Need at least 2 blocks") ## apply 'fun' to each block if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(Y, fun, ...) } else { ## need to ensure compatible fv objects z <- lapply(Y, fun, ...) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(Y, fun, ..., r=r) fX <- fun(X, ..., r=r) } } else { ## use 'domain' argument of 'fun' to compute contributions from each tile B <- tiles(blocks) if(some.zeroes) B <- B[blockok] n <- length(B) if(any(c("r", "breaks") %in% names(list(...)))) { ## r vector specified fX <- fun(X, ...) z <- lapply(B, dofun, ..., fun=fun, Xpp=X) } else { ## need to ensure compatible fv objects z <- lapply(B, dofun, ..., fun=fun, Xpp=X) rlist <- lapply(z, getrvalues) rmax <- min(sapply(rlist, max)) rstep <- min(sapply(rlist, stepsize)) r <- seq(0, rmax, by=rstep) z <- lapply(B, dofun, ..., fun=fun, Xpp=X, r=r) fX <- fun(X, ..., r=r) } } ## find columns that are common to all estimates zzz <- reconcile.fv(append(list(fX), z)) fX <- zzz[[1]] z <- zzz[-1] ## sample mean m <- meanlistfv(z) ## sample variance sqdev <- lapply(z, sqdev.fv, m=m) v <- meanlistfv(sqdev) v <- eval.fv(v * n/(n-1), dotonly=FALSE) ## sample standard deviation sd <- eval.fv(sqrt(v), dotonly=FALSE) ## upper and lower limits sem <- eval.fv(sd/sqrt(n), dotonly=FALSE) zcrit <- qnorm(probs) lower <- eval.fv(m + zcrit[1] * sem, dotonly=FALSE) upper <- eval.fv(m + zcrit[2] * sem, dotonly=FALSE) ## rebadge fva <- .Spatstat.FvAttrib fva <- fva[fva %in% names(attributes(fX))] attributes(m)[fva] <- attributes(v)[fva] <- attributes(sd)[fva] <- attributes(upper)[fva] <- attributes(lower)[fva] <- attributes(fX)[fva] m <- prefixfv(m, "mean", "sample mean of", "bold(mean)~") v <- prefixfv(v, "var", "estimated variance of", "bold(var)~") sd <- prefixfv(sd, "sd", "estimated standard deviation of", "bold(sd)~") CItext <- paste(c("lower", "upper"), paste0(100 * confidence, "%%"), "CI limit for") lower <- prefixfv(lower, "lo", CItext[1], "bold(lo)~") upper <- prefixfv(upper, "hi", CItext[2], "bold(hi)~") ## tack together out <- cbind(fX,m,v,sd,upper,lower) ## restrict r domain bad <- matrowall(!is.finite(as.matrix(as.data.frame(out)))) rmax <- max(getrvalues(out)[!bad]) alim <- c(0, rmax) if(!canrestrict) alim <- intersect.ranges(attr(out, "alim"), alim) attr(out, "alim") <- alim ## sensible default plot formula ybase <- fvnames(fX, ".y") xname <- fvnames(fX, ".x") tname <- intersect("theo", fvnames(fX, ".")) fvnames(out, ".y") <- yname <- paste0("mean", ybase) fvnames(out, ".s") <- snames <- paste0(c("lo", "hi"), ybase) fvnames(out, ".") <- c(yname, tname, snames) attr(out, "fmla") <- paste(". ~ ", xname) return(out) } sqdev.fv <- function(x,m){ eval.fv((x-m)^2, dotonly=FALSE) } varblock }) meanlistfv <- local({ getYmatrix <- function(x, yn=ynames) { as.matrix(as.data.frame(x)[,yn]) } meanlistfv <- function(z, ...) { ## compute sample mean of a list of fv objects if(!is.list(z) || !all(unlist(lapply(z, is.fv)))) stop("z should be a list of fv objects") if(!do.call(compatible, unname(z))) stop("Objects are not compatible") result <- template <- z[[1]] ## extract each object's function values as a matrix ynames <- fvnames(template, "*") matlist <- unname(lapply(z, getYmatrix, yn=ynames)) ## stack matrices into an array y <- do.call(abind, append(matlist, list(along=3))) ## take mean ymean <- apply(y, 1:2, mean, ...) result[,ynames] <- ymean return(result) } meanlistfv }) spatstat.explore/R/Tstat.R0000644000176200001440000002107314611073307015222 0ustar liggesusers# # tstat.R Estimation of T function # # $Revision: 1.12 $ $Date: 2018/07/02 15:45:48 $ # Tstat <- local({ # helper functions diffrange <- function(z) diff(range(z, na.rm=TRUE)) edgetri.Trans <- function(X, triid, trim=spatstat.options("maxedgewt")) { triid <- as.matrix(triid) ntri <- nrow(triid) if(ntri == 0) return(numeric(0)) W <- rescue.rectangle(as.owin(X)) if(W$type != "rectangle") stop("Translation correction is only implemented for rectangular windows") x <- matrix(X$x[triid], nrow=ntri) y <- matrix(X$y[triid], nrow=ntri) dx <- apply(x, 1, diffrange) dy <- apply(y, 1, diffrange) wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high/((wide - dx) * (high - dy)) weight <- pmin.int(trim, weight) return(weight) } # helper function implemented.for.T <- function(correction, windowtype, explicit) { rect <- (windowtype == "rectangle") if(any(correction == "best")) { # select best available correction correction <- if(rect) "translate" else "border" } else { # available selection of edge corrections depends on window if(!rect) { tra <- (correction == "translate") if(any(tra)) { whinge <- "Translation correction is only implemented for rectangular windows" if(explicit) { if(all(tra)) stop(whinge) else warning(whinge) } correction <- correction[!tra] } } } return(correction) } # .......... main function .................... Tstat <- function(X, ..., r=NULL, rmax=NULL, correction=c("border", "translate"), ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") # rfixed <- !is.null(r) npts <- npoints(X) W <- Window(X) areaW <- area(W) lambda <- npts/areaW lambda2 <- (npts * (npts - 1))/(areaW^2) lambda3 <- (npts * (npts - 1) * (npts - 2))/(areaW^3) rmaxdefault <- if(!is.null(rmax)) rmax else rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.T(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame TT <- data.frame(r=r, theo= (pi/2) * (pi - 3 * sqrt(3)/4) * r^4) desc <- c("distance argument r", "theoretical Poisson %s") TT <- fv(TT, "r", quote(T(r)), "theo", , alim, c("r","%s[pois](r)"), desc, fname="T") # save numerator and denominator? if(ratio) { denom <- lambda2 * areaW numT <- eval.fv(denom * TT) denT <- eval.fv(denom + TT * 0) attributes(numT) <- attributes(denT) <- attributes(TT) attr(numT, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denT, "desc")[2] <- "denominator for theoretical Poisson %s" } # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax, what="ijd", twice=FALSE, neat=FALSE) I <- close$i J <- close$j DIJ <- close$d nI <- length(I) # estimate computation time if(verbose) { nTmax <- nI * (nI-1) /2 esttime <- exp(1.25 * log(nTmax) - 21.5) message(paste("Searching", nTmax, "potential triangles;", "estimated time", codetime(esttime))) } # find triangles with their diameters tri <- trianglediameters(I, J, DIJ, nvert=npts) stopifnot(identical(colnames(tri), c("i", "j", "k", "diam"))) # reassemble so each triangle appears 3 times, once for each vertex II <- with(tri, c(i, j, k)) DD <- with(tri, rep.int(diam, 3)) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DD, breaks$val) # no weights numTun <- cumsum(wh) denTun <- lambda3 * areaW # uncorrected estimate of T Tun <- numTun/denTun TT <- bind.fv(TT, data.frame(un=Tun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(un=numTun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denT <- bind.fv(denT, data.frame(un=denTun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[II] # apply reduced sample algorithm RS <- Kount(DD, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r) numTbm <- RS$numerator denTbm <- lambda3 * denom.area Tbm <- numTbm/denTbm TT <- bind.fv(TT, data.frame(bord.modif=Tbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(bord.modif=numTbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denT <- bind.fv(denT, data.frame(bord.modif=denTbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numTb <- RS$numerator denTb <- lambda2 * RS$denom.count Tb <- numTb/denTb TT <- bind.fv(TT, data.frame(border=Tb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numT <- bind.fv(numT, data.frame(border=numTb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denT <- bind.fv(denT, data.frame(border=denTb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction # apply to triangle list edgewt <- edgetri.Trans(X, tri[, 1:3]) wh <- whist(tri$diam, breaks$val, edgewt) numTtrans <- 3 * cumsum(wh) denTtrans <- lambda3 * areaW Ttrans <- numTtrans/denTtrans h <- diameter(W)/2 Ttrans[r >= h] <- NA TT <- bind.fv(TT, data.frame(trans=Ttrans), "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans") if(ratio) { numT <- bind.fv(numT, data.frame(trans=numTtrans), "hat(%s)[trans](r)", "numerator of translation-corrected estimate of %s", "trans") denT <- bind.fv(denT, data.frame(trans=denTtrans), "hat(%s)[trans](r)", "denominator of translation-corrected estimate of %s", "trans") } } # default plot will display all edge corrections formula(TT) <- . ~ r unitname(TT) <- unitname(X) # if(ratio) { # finish up numerator & denominator formula(numT) <- formula(denT) <- . ~ r unitname(numT) <- unitname(denT) <- unitname(TT) # tack on to result TT <- rat(TT, numT, denT, check=FALSE) } return(TT) } Tstat }) spatstat.explore/R/transect.R0000644000176200001440000000751114611073311015742 0ustar liggesusers# # transect.R # # Line transects of pixel images # # $Revision: 1.8 $ $Date: 2021/06/22 05:33:50 $ # transect.im <- local({ specify.location <- function(loc, rect) { lname <- short.deparse(substitute(loc)) if(is.numeric(loc) && length(loc) == 2) return(list(x=loc[1], y=loc[2])) if(is.list(loc)) return(xy.coords(loc)) if(!(is.character(loc) && length(loc) == 1)) stop(paste("Unrecognised format for", sQuote(lname)), call.=FALSE) xr <- rect$xrange yr <- rect$yrange switch(loc, bottomleft = list(x=xr[1], y=yr[1]), bottom = list(x=mean(xr), y=yr[1]), bottomright = list(x=xr[2], y=yr[1]), right = list(x=xr[2], y=mean(yr)), topright = list(x=xr[2], y=yr[2]), top = list(x=mean(xr), y=yr[2]), topleft = list(x=xr[1], y=yr[2]), left = list(x=xr[1], y=mean(yr)), centre=, center = list(x=mean(xr), y=mean(yr)), stop(paste("Unrecognised location", sQuote(lname), "=", dQuote(loc)), call.=FALSE) ) } transect.im <- function(X, ..., from="bottomleft", to="topright", nsample=512, click=FALSE, add=FALSE, curve=NULL) { Xname <- short.deparse(substitute(X)) Xname <- sensiblevarname(Xname, "X") stopifnot(is.im(X)) check.1.integer(nsample) if(length(curve)) { ## parametric curve ## validate specification of curve check.named.list(curve, c("f", "tlim"), namopt=c("tname", "tdescrip"), xtitle="curve") stopifnot(is.function(curve$f)) check.range(curve$tlim) ## extract info tlim <- curve$tlim tname <- curve$tname %orifnull% "t" tdescrip <- curve$tdescrip %orifnull% "curve parameter" tunits <- NULL ## create sample points along curve t <- seq(tlim[1L], tlim[2L], length.out=nsample) xy <- (curve$f)(t) if(is.null(dim(xy))) stop("curve$f() should return a matrix or data frame") if(ncol(xy) != 2L) stop("curve$f() should return a matrix or data frame with 2 columns") hasnames <- all(c("x", "y") %in% colnames(xy)) x <- xy[, if(hasnames) "x" else 1L] y <- xy[, if(hasnames) "y" else 2L] } else { ## straight line transect if(click) { ## interactive if(!add) plot(X) from <- spatstatLocator(1) points(from) to <- spatstatLocator(1) points(to) segments(from$x, from$y, to$x, to$y) } else { ## data defining a line segment R <- as.rectangle(X) from <- specify.location(from, R) to <- specify.location(to, R) } ## create sample points along transect if(identical(from,to)) stop(paste(sQuote("from"), "and", sQuote("to"), "must be distinct points"), call.=FALSE) u <- seq(0,1,length.out=nsample) x <- from$x + u * (to$x - from$x) y <- from$y + u * (to$y - from$y) leng <- sqrt( (to$x - from$x)^2 + (to$y - from$y)^2) t <- u * leng tname <- "t" tdescrip <- "distance along transect" tunits <- unitname(X) } ## look up pixel values (may be NA) v <- X[list(x=x, y=y), drop=FALSE] ## package into fv object df <- data.frame(t=t, v=v) colnames(df) <- c(tname, Xname) fv(df, argu = tname, ylab = substitute(Xname(tname), list(Xname=as.name(Xname), tname=as.name(tname))), valu=Xname, labl = c(tname, paste0("%s", paren(tname))), desc = c(tdescrip, "pixel value of %s"), unitname = tunits, fname = Xname) } transect.im }) spatstat.explore/R/relriskHeat.R0000644000176200001440000000672314700376104016405 0ustar liggesusers#' #' relriskHeat.R #' #' Relative risk/conditional probability using diffusion smoothing #' #' Copyright (C) 2018-2024 Adrian Baddeley, Tilman Davies and Suman Rakshit #' #' $Revision: 1.7 $ $Date: 2024/10/06 02:55:11 $ #' #' relriskHeat <- function(X,...) { UseMethod("relriskHeat") } relriskHeat.ppp <- function(X,..., sigmaX=NULL, weights=NULL){ stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) nX <- npoints(X) Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") type <- marks(X) if(length(sigmaX)) { check.nvector(sigmaX, nX) sigmaX <- split(sigmaX, type) } else sigmaX <- rep(list(NULL), ntypes) if(length(weights)) { check.nvector(weights, nX) weights <- split(weights, type) } else weights <- rep(list(NULL), ntypes) Deach <- mapply(densityHeat, x=Y, sigmaX=sigmaX, weights=weights, MoreArgs=list(...), SIMPLIFY=FALSE) Dall <- Reduce("+", Deach) probs <- solapply(Deach, "/", e2=Dall) return(probs) } bw.relriskHeatppp <- function(X, ..., method=c("likelihood", "leastsquares"), weights=NULL, srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) method <- match.arg(method) U <- unmark(X) Y <- split(X) Denominator <- HeatEstimates.ppp(U, ..., weights=weights, srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) h <- Denominator$h hname <- Denominator$hname # extract denominator value for each sigma (row) and each data point (col) lambda.denom <- Denominator$lambda #' if(is.null(weights)) { Numerators <- lapply(Y, HeatEstimates.ppp, ..., srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) } else { check.nvector(weights, npoints(X), oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, npoints(X)) wsplit <- split(weights, marks(X)) Numerators <- mapply(HeatEstimates.ppp, X=Y, weights=wsplit, MoreArgs = list(..., srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose), SIMPLIFY=FALSE) } #' extract estimates of numerator terms lamlist <- lapply(Numerators, getElement, name="lambda") #' reassemble into original position #' (tried to do this with 'unsplit' but it's too messy) opos <- split(seq_len(npoints(X)), marks(X)) lambda.numer <- matrix(, nrow=nrow(lambda.denom), ncol=ncol(lambda.denom)) for(k in seq_along(opos)) { if(length(opos.k <- opos[[k]])) lambda.numer[ , opos.k] <- lamlist[[k]] } #' compute predicted probability of observations phat <- lambda.numer/lambda.denom #' compute cross-validation criterion switch(method, likelihood = { CV <- -rowMeans(log(phat)) cname <- "Likelihood cross-validation" }, leastsquares = { CV <- rowMeans((1 - phat)^2) cname <- "Least squares cross-validation" }) result <- bw.optim(CV, h, criterion=cname, hname=hname, unitname=unitname(X)) return(result) } spatstat.explore/R/circdensity.R0000644000176200001440000000302514611073310016432 0ustar liggesusers#' #' circdensity.R #' #' Kernel smoothing for circular data #' #' $Revision: 1.6 $ $Date: 2023/08/14 06:28:59 $ circdensity <- function(x, sigma="nrd0", ..., bw=NULL, weights=NULL, unit=c("degree", "radian")) { xname <- short.deparse(substitute(x)) missu <- missing(unit) if(missing(sigma) && !is.null(bw)) sigma <- bw unit <- match.arg(unit) unit <- validate.angles(x, unit, missu) FullCircle <- switch(unit, degree = 360, radian = 2*pi) if(is.character(sigma)) { sigma <- switch(sigma, bcv = bw.bcv, nrd = bw.nrd, nrd0 = bw.nrd0, SJ = bw.SJ, ucv = bw.ucv, get(paste0("bw.", sigma), mode="function")) } if(is.function(sigma)) { sigma <- sigma(x) if(!(is.numeric(sigma) && length(sigma) == 1L && sigma > 0)) stop("Bandwidth selector should return a single positive number") } check.1.real(sigma) #' replicate data x <- x %% FullCircle xx <- c(x - FullCircle, x, x + FullCircle) #' replicate weights if(!is.null(weights)) { stopifnot(length(weights) == length(x)) weights <- rep(weights, 3)/3 } #' smooth z <- do.call(density.default, resolve.defaults(list(x=xx, bw=sigma, weights=weights), list(...), list(from=0, to=FullCircle, warnWbw=FALSE))) z$y <- 3 * z$y z$data.name <- xname z$call <- match.call() return(z) } spatstat.explore/R/markcorr.R0000644000176200001440000007007014611073310015736 0ustar liggesusers## ## ## markcorr.R ## ## $Revision: 1.88 $ $Date: 2022/06/08 03:07:56 $ ## ## Estimate the mark correlation function ## and related functions ## ## ------------------------------------------------------------------------ markvario <- local({ halfsquarediff <- function(m1, m2) { ((m1-m2)^2)/2 } assigntheo <- function(x, value) { x$theo <- value; return(x) } markvario <- function(X, correction=c("isotropic", "Ripley", "translate"), r=NULL, method="density", ..., normalise=FALSE) { m <- onecolumn(marks(X)) if(!is.numeric(m)) stop("Marks are not numeric") if(missing(correction)) correction <- NULL ## compute reference value Ef weights <- pointweights(X, ..., parent=parent.frame()) Ef <- if(is.null(weights)) var(m) else weighted.var(m, weights) ## Compute estimates v <- markcorr(X, f=halfsquarediff, r=r, correction=correction, method=method, normalise=normalise, ..., internal=list(Ef=Ef)) if(is.fv(v)) v <- anylist(v) ## adjust theoretical value and fix labels theoval <- if(normalise) 1 else var(m) for(i in seq_len(length(v))) { v[[i]]$theo <- theoval v[[i]] <- rebadge.fv(v[[i]], quote(gamma(r)), "gamma") } if(length(v) == 1) v <- v[[1]] return(v) } markvario }) markconnect <- local({ indicateij <- function(m1, m2, i, j) { (m1 == i) & (m2 == j) } markconnect <- function(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] if(missing(j)) j <- lev[2] ## compute reference value Ef weights <- pointweights(X, ..., parent=parent.frame()) Ef <- if(is.null(weights)) mean(marx == i) * mean(marx == j) else mean(weights * (marx == i)) * mean(weights * (marx == j)) ## compute estimates p <- markcorr(X, f=indicateij, r=r, correction=correction, method=method, ..., fargs=list(i=i, j=j), normalise=normalise, internal=list(Ef=Ef)) ## alter theoretical value and fix labels if(!normalise) { pipj <- mean(marx==i) * mean(marx==j) p$theo <- pipj } else { p$theo <- 1 } p <- rebadge.fv(p, new.ylab=substitute(p[i,j](r), list(i=paste(i),j=paste(j))), new.fname=c("p", paste0("list(", i, ",", j, ")")), new.yexp=substitute(p[list(i,j)](r), list(i=paste(i),j=paste(j)))) return(p) } markconnect }) Emark <- local({ f1 <- function(m1, m2) { m1 } Emark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.marked(X)) marx <- marks(X) isvec <- is.vector(marx) && is.numeric(marx) isdf <- is.data.frame(marx) && all(sapply(as.list(marx), is.numeric)) if(!(isvec || isdf)) stop("All marks of X should be numeric") if(missing(correction)) correction <- NULL E <- markcorr(X, f1, r=r, correction=correction, method=method, ..., normalise=normalise) if(isvec) { E <- rebadge.fv(E, quote(E(r)), "E") } else { E[] <- lapply(E, rebadge.fv, new.ylab=quote(E(r)), new.fname="E") } return(E) } Emark }) Vmark <- local({ f2 <- function(m1, m2) { m1^2 } Vmark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { if(missing(correction)) correction <- NULL E <- Emark(X, r=r, correction=correction, method=method, ..., normalise=FALSE) E2 <- markcorr(X, f2, r=E$r, correction=correction, method=method, ..., normalise=FALSE) if(normalise) { sig2 <- var(marks(X)) if(is.matrix(sig2)) sig2 <- diag(sig2) } if(is.fv(E)) { E <- list(E) E2 <- list(E2) } V <- list() for(i in seq_along(E)) { Ei <- E[[i]] E2i <- E2[[i]] Vi <- eval.fv(E2i - Ei^2) if(normalise) Vi <- eval.fv(Vi/sig2[i]) Vi <- rebadge.fv(Vi, quote(V(r)), "V") attr(Vi, "labl") <- attr(Ei, "labl") V[[i]] <- Vi } if(length(V) == 1) return(V[[1]]) V <- as.anylist(V) names(V) <- colnames(marks(X)) return(V) } Vmark }) ############## workhorses 'markcorr' and 'markcorrint' #################### markcorrint <- Kmark <- function(X, f=NULL, r=NULL, correction=c("isotropic", "Ripley", "translate"), ..., f1=NULL, normalise=TRUE, returnL=FALSE, fargs=NULL) { ## Computes the analogue of Kest(X) ## where each pair (x_i,x_j) is weighted by w(m_i,m_j) ## ## If multiplicative=TRUE then w(u,v) = f(u) f(v) ## If multiplicative=FALSE then w(u,v) = f(u, v) ## stopifnot(is.ppp(X) && is.marked(X)) is.marked(X, dfok=FALSE) W <- Window(X) ## if(identical(sys.call()[[1]], as.name('markcorrint'))) warn.once('markcorrint', "markcorrint will be deprecated in future versions of spatstat;", "use the equivalent function Kmark") ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype multiplicative <- ftype %in% c("mul", "product") ## ## check corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) isborder <- correction %in% c("border", "bord.modif") if(any(isborder) && !multiplicative) { whinge <- paste("Border correction is not valid unless", "test function is of the form f(u,v) = f1(u)*f1(v)") correction <- correction[!isborder] if(length(correction) == 0) stop(whinge) else warning(whinge) } ## estimated intensity lambda <- intensity(X) mX <- marks(X) switch(ftype, mul={ wt <- mX/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(mX)^2 }, equ={ fXX <- outer(mX, mX, "==") wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) mtable <- table(mX) Ef2 <- sum(mtable^2)/length(mX)^2 }, product={ f1X <- do.call(f1, append(list(mX), fargs)) wt <- f1X/lambda K <- Kinhom(X, r=r, reciplambda=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(f1X)^2 }, general={ fXX <- do.call(outer, append(list(mX, mX, f), fargs)) wt <- fXX/lambda^2 K <- Kinhom(X, r=r, reciplambda2=wt, correction=correction, ..., renormalise=FALSE) Ef2 <- mean(fXX) }) K$theo <- K$theo * Ef2 labl <- attr(K, "labl") if(normalise) K <- eval.fv(K/Ef2) if(returnL) K <- eval.fv(sqrt(K/pi)) attr(K, "labl") <- labl if(normalise && !returnL) { ylab <- quote(K[f](r)) fnam <- c("K", "f") } else if(normalise && returnL) { ylab <- quote(L[f](r)) fnam <- c("L", "f") } else if(!normalise && !returnL) { ylab <- quote(C[f](r)) fnam <- c("C", "f") } else { ylab <- quote(sqrt(C[f](r)/pi)) fnam <- "sqrt(C[f]/pi)" } K <- rebadge.fv(K, ylab, fnam) return(K) } markcorr <- function(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL, internal=NULL) { ## mark correlation function with test function f stopifnot(is.ppp(X) && is.marked(X)) nX <- npoints(X) ## set defaults to NULL if(missing(f)) f <- NULL if(missing(correction)) correction <- NULL ## handle data frame of marks marx <- marks(X, dfok=TRUE) if(is.data.frame(marx)) { nc <- ncol(marx) result <- list() for(j in 1:nc) { Xj <- X %mark% marx[,j] result[[j]] <- markcorr(Xj, f=f, r=r, correction=correction, method=method, ..., weights=weights, f1=f1, normalise=normalise, fargs=fargs) } result <- as.anylist(result) names(result) <- colnames(marx) return(result) } ## weights if(unweighted <- is.null(weights)) { weights <- rep(1, nX) } else { weights <- pointweights(X, weights=weights, parent=parent.frame()) stopifnot(all(weights > 0)) } ## validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype ## ## npts <- npoints(X) W <- X$window ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## Denominator ## Ef = Ef(M,M') when M, M' are independent ## Optionally provided by other code Ef <- internal$Ef if(is.null(Ef)) { ## Apply f to every possible pair of marks, and average Ef <- switch(ftype, mul = { mean(marx * weights)^2 }, equ = { if(unweighted) { mtable <- table(marx) } else { mtable <- tapply(weights, marx, sum) mtable[is.na(mtable)] <- 0 } sum(mtable^2)/nX^2 }, product={ f1m <- do.call(f1, append(list(marx), fargs)) mean(f1m * weights)^2 }, general = { mcross <- if(is.null(fargs)) { outer(marx, marx, f) } else { do.call(outer, append(list(marx,marx,f),fargs)) } if(unweighted) { mean(mcross) } else { wcross <- outer(weights, weights, "*") mean(mcross * wcross) } }, stop("Internal error: invalid ftype")) } if(normalise) { theory <- 1 Efdenom <- Ef } else { theory <- Ef Efdenom <- 1 } if(normalise) { ## check validity of denominator if(Efdenom == 0) stop("Cannot normalise the mark correlation; the denominator is zero") else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation:", "the denominator is negative")) } ## this will be the output data frame result <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function if(ftype %in% c("mul", "equ")) { if(normalise) { ylab <- quote(k[mm](r)) fnam <- c("k", "mm") } else { ylab <- quote(c[mm](r)) fnam <- c("c", "mm") } } else { if(normalise) { ylab <- quote(k[f](r)) fnam <- c("k", "f") } else { ylab <- quote(c[f](r)) fnam <- c("c", "f") } } result <- fv(result, "r", ylab, "theo", , alim, c("r","{%s[%s]^{iid}}(r)"), desc, fname=fnam) ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## apply f to marks of close pairs of points ## mI <- marx[I] mJ <- marx[J] ff <- switch(ftype, mul = mI * mJ, equ = (mI == mJ), product={ if(is.null(fargs)) { fI <- f1(mI) fJ <- f1(mJ) } else { fI <- do.call(f1, append(list(mI), fargs)) fJ <- do.call(f1, append(list(mJ), fargs)) } fI * fJ }, general={ if(is.null(fargs)) f(marx[I], marx[J]) else do.call(f, append(list(marx[I], marx[J]), fargs)) }) ## check values of f(M1, M2) if(is.logical(ff)) ff <- as.numeric(ff) else if(!is.numeric(ff)) stop("function f did not return numeric values") if(anyNA(ff)) switch(ftype, mul=, equ=stop("some marks were NA"), product=, general=stop("function f returned some NA values")) if(normalise && any(ff < 0)) switch(ftype, mul=, equ=stop("negative marks are not permitted when normalise=TRUE"), product=, general=stop("negative values of function f are not permitted when normalise=TRUE")) ## weights if(!unweighted) ff <- ff * weights[I] * weights[J] #### Compute estimates ############## if(any(correction == "none")) { ## uncorrected estimate edgewt <- rep.int(1, length(dIJ)) ## get smoothed estimate of mark covariance Mnone <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(un=Mnone), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(result) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(result) <- (. ~ r) fvnames(result, ".") <- corrxns ## unitname(result) <- unitname(X) return(result) } ## mark cross-correlation function markcrosscorr <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=TRUE, Xname=NULL) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X) && is.marked(X)) npts <- npoints(X) W <- Window(X) ## available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) ## determine r values rmaxdefault <- rmax.rule("K", W, npts/area(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max ## find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) ## determine estimation method if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) ## ensure marks are a data frame marx <- marks(X, dfok=TRUE) if(!is.data.frame(marx)) marx <- data.frame(marks=marx) ## convert factor marks to dummy variables while(any(isfac <- sapply(marx, is.factor))) { i <- min(which(isfac)) mari <- marx[,i] levi <- levels(mari) nami <- colnames(marx)[i] dumi <- 1 * outer(mari, levi, "==") colnames(dumi) <- paste0(nami, levi) marx <- as.data.frame(append(marx[,-i,drop=FALSE], list(dumi), after=i-1)) } nc <- ncol(marx) nama <- colnames(marx) ## loop over all pairs of columns funs <- list() for(i in 1:nc) { marxi <- marx[,i] namei <- nama[i] for(j in 1:nc) { marxj <- marx[,j] namej <- nama[j] ## Denominator ## Ef = E M M' = EM EM' ## when M, M' are independent from the respective columns Ef <- mean(marxi) * mean(marxj) if(normalise) { theory <- 1 Efdenom <- Ef ## check validity of denominator if(Efdenom == 0) stop(paste("Cannot normalise the mark correlation for", namei, "x", namej, "because the denominator is zero"), call.=FALSE) else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation for", namei, "x", namej, "- the denominator is negative"), call.=FALSE) } else { theory <- Ef Efdenom <- 1 } ## this will be the output data frame df.ij <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) ## determine conventional name of function mimj <- as.name(paste0(namei,".",namej)) if(normalise) { ylab <- substitute(k[mm](r), list(mm=mimj)) fnam <- c("k", as.character(mimj)) } else { ylab <- substitute(c[mm](r), list(mm=mimj)) fnam <- c("c", as.character(mimj)) } fun.ij <- fv(df.ij, "r", ylab, "theo", , alim, c("r","{%s[%s]^{ind}}(r)"), desc, fname=fnam) mI <- marxi[I] mJ <- marxj[J] ff <- mI * mJ ## check values of f(M1, M2) if(anyNA(ff)) stop("some marks were NA", call.=FALSE) if(normalise && any(ff < 0)) stop("negative marks are not permitted when normalise=TRUE") ## Compute estimates ############## if(any(correction == "none")) { ## uncorrected estimate edgewt <- rep.int(1, length(dIJ)) ## get smoothed estimate of mark covariance Mnone <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(un=Mnone), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) ## get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(trans=Mtrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) ## get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) fun.ij <- bind.fv(fun.ij, data.frame(iso=Miso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## which corrections have been computed? nama2 <- names(fun.ij) corrxns <- rev(nama2[nama2 != "r"]) ## default is to display them all formula(fun.ij) <- (. ~ r) fvnames(fun.ij, ".") <- corrxns ## unitname(fun.ij) <- unitname(X) funs <- append(funs, list(fun.ij)) } } # matrix mapping array entries to list positions in 'funs' witch <- matrix(1:(nc^2), nc, nc, byrow=TRUE) header <- paste("Mark cross-correlation functions for", Xname) answer <- fasp(funs, witch, rowNames=nama, colNames=nama, title=header, dataname=Xname) return(answer) } sewsmod <- function(d, ff, wt, Ef, rvals, method="smrep", ..., nwtsteps=500) { ## Smooth Estimate of Weighted Second Moment Density ## (engine for computing mark correlations, etc) ## ------ ## Vectors containing one entry for each (close) pair of points ## d = interpoint distance ## ff = f(M1, M2) where M1, M2 are marks at the two points ## wt = edge correction weight ## ----- ## Ef = E[f(M, M')] where M, M' are independent random marks ## d <- as.vector(d) ff <- as.vector(ff) wt <- as.vector(wt) switch(method, density={ ## smooth estimate of kappa_f Kf <- unnormdensity(d, weights=ff * wt, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y ## smooth estimate of kappa_1 K1 <- unnormdensity(d, weights=wt, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y result <- Kf/(Ef * K1) }, sm={ ## This is slow! suppressWarnings(smok <- requireNamespace("sm")) if(!smok) stop(paste("Option method=sm requires package sm,", "which is not available")) ## smooth estimate of kappa_f fw <- ff * wt est <- sm::sm.density(d, weights=fw, eval.points=rvals, display="none", nbins=0, ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(d, weights=wt, eval.points=rvals, display="none", nbins=0, ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, smrep={ suppressWarnings(smok <- requireNamespace("sm")) if(!smok) stop(paste("Option method=smrep requires package sm,", "which is not available")) hstuff <- resolve.defaults(list(...), list(hmult=1, h.weights=NA)) if(hstuff$hmult == 1 && all(is.na(hstuff$h.weights))) warning("default smoothing parameter may be inappropriate") ## use replication to effect the weights (it's faster) nw <- round(nwtsteps * wt/max(wt)) drep.w <- rep.int(d, nw) fw <- ff * wt nfw <- round(nwtsteps * fw/max(fw)) drep.fw <- rep.int(d, nfw) ## smooth estimate of kappa_f est <- sm::sm.density(drep.fw, eval.points=rvals, display="none", ...)$estimate numerator <- est * sum(fw)/sum(est) ## smooth estimate of kappa_1 est0 <- sm::sm.density(drep.w, eval.points=rvals, display="none", ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, loess = { ## set up data frame df <- data.frame(d=d, ff=ff, wt=wt) ## fit curve to numerator using loess fitobj <- loess(ff ~ d, data=df, weights=wt, ...) ## evaluate fitted curve at desired r values Eff <- predict(fitobj, newdata=data.frame(d=rvals)) ## normalise: ## denominator is the sample mean of all ff[i,j], ## an estimate of E(ff(M1,M2)) for M1,M2 independent marks result <- Eff/Ef }, ) return(result) } ############## user interface bits ################################## check.testfun <- local({ fmul <- function(m1, m2) { m1 * m2 } fequ <- function(m1, m2) { m1 == m2 } f1id <- function(m) { m } check.testfun <- function(f=NULL, f1=NULL, X) { ## Validate f or f1 as a test function for point pattern X ## Determine function type 'ftype' ## ("mul", "equ", "product" or "general") if(is.null(f) && is.null(f1)) { ## no functions given ## default depends on kind of marks if(is.multitype(X)) { f <- fequ ftype <- "equ" } else { f1 <- f1id ftype <- "mul" } } else if(!is.null(f1)) { ## f1 given ## specifies test function of the form f(u,v) = f1(u) f1(v) if(!is.null(f)) warning("argument f ignored (overridden by f1)") stopifnot(is.function(f1)) ftype <- "product" } else { ## f given if(is.character(fname <- f)) { switch(fname, "mul" = { f1 <- f1id ftype <- "mul" }, "equ" = { f <- fequ ftype <- "equ" }, { f <- get(fname) ftype <- "general" }) } else if(is.function(f)) { ftype <- if(isTRUE(all.equal(f, fmul))) "mul" else if(isTRUE(all.equal(f, fequ))) "equ" else "general" if(ftype == "mul" && is.multitype(X)) stop(paste("Inappropriate choice of function f;", "point pattern is multitype;", "types cannot be multiplied.")) } else stop("Argument f must be a function or the name of a function") } return(list(f=f, f1=f1, ftype=ftype)) } check.testfun }) spatstat.explore/R/segtest.R0000644000176200001440000000357114611073310015576 0ustar liggesusers#' #' segtest.R #' #' Monte Carlo test of segregation for multitype patterns #' #' $Revision: 1.6 $ $Date: 2022/04/06 07:35:46 $ #' segregation.test <- function(X, ...) { UseMethod("segregation.test") } segregation.test.ppp <- function(X, ..., nsim=19, permute=TRUE, verbose=TRUE, Xname) { if(missing(Xname)) Xname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) check.1.integer(nsim) stopifnot(nsim > 1) verboten <- c("at", "relative", "se", "leaveoneout", "casecontrol", "case", "control") if(any(nyet <- (verboten %in% names(list(...))))) stop(paste(ngettext(sum(nyet), "Argument", "Arguments"), commasep(sQuote(verboten[nyet])), "cannot be used")) lam <- intensity(X) pbar <- lam/sum(lam) np <- npoints(X) nt <- length(pbar) pbar <- matrix(pbar, byrow=TRUE, nrow=np, ncol=nt) if(verbose) cat("Computing observed value... ") phat <- relrisk(X, at="points", ..., casecontrol=FALSE) obs <- sum((phat-pbar)^2) if(verbose) { cat(paste("Done.\nComputing", nsim, "simulated values... ")) pstate <- list() } sim <- numeric(nsim) for(i in 1:nsim) { Xsim <- rlabel(X, permute=permute) phatsim <- relrisk(Xsim, at="points", ..., casecontrol=FALSE) if(permute) pbarsim <- pbar else { lamsim <- intensity(Xsim) pbarsim <- lamsim/sum(lamsim) pbarsim <- matrix(pbarsim, byrow=TRUE, nrow=np, ncol=nt) } sim[i] <- sum((phatsim - pbarsim)^2) if(verbose) pstate <- progressreport(i, nsim, state=pstate) } if(verbose) cat("Done.\n") p.value <- (1+sum(sim >= obs))/(1+nsim) names(obs) <- "T" out <- list(statistic=obs, p.value=p.value, method="Monte Carlo test of spatial segregation of types", data.name=Xname) class(out) <- "htest" return(out) } spatstat.explore/R/Iest.R0000644000176200001440000000517614611073307015035 0ustar liggesusers# Iest.R # # I function # # $Revision: 1.16 $ $Date: 2019/10/31 03:01:26 $ # # # Iest <- local({ Iest <- function(X, ..., eps=NULL, r = NULL, breaks = NULL, correction=NULL) { X <- as.ppp(X) if(!is.multitype(X)) stop("Only applicable to multitype point patterns") marx <- marks(X, dfok=FALSE) ntypes <- length(levels(marx)) Y <- unmark(split(X)) ## relative proportions ni <- sapply(Y, npoints) fi <- ni/sum(ni) ## J function of pattern regardless of type Jdotdot <- Jest(unmark(X), correction=correction, r=r, eps=eps, breaks=breaks, ...) rvals <- Jdotdot$r ## J function of subpattern of each type i Jii <- lapply(Y, Jest, r=rvals, correction=correction, eps=eps, ...) nrvals <- lengths(lapply(Jii, getElement, name="r")) if(length(unique(nrvals)) != 1 || nrvals[1] != length(rvals)) stop("Internal error: J function objects have different lengths") ## initialise fv object alim <- attr(Jdotdot, "alim") Z <- fv(data.frame(r=rvals, theo=0), "r", substitute(I(r), NULL), "theo", . ~ r, alim, c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="I") ## Estimates of each type namii <- unlist(lapply(Jii, names)) namdd <- names(Jdotdot) bothnames <- namii[namii %in% namdd] if("un" %in% bothnames) { Jun <- matrix(extract(Jii, "un"), nrow=ntypes, byrow=TRUE) Iun <- apply(fi * Jun, 2, sum) - Jdotdot$un Z <- bind.fv(Z, data.frame(un=Iun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- matrix(extract(Jii, "rs"), nrow=ntypes, byrow=TRUE) Irs <- apply(fi * Jrs, 2, sum) - Jdotdot$rs Z <- bind.fv(Z, data.frame(rs=Irs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") } if("han" %in% bothnames) { Jhan <- matrix(extract(Jii, "han"), nrow=ntypes, byrow=TRUE) Ihan <- apply(fi * Jhan, 2, sum) - Jdotdot$han Z <- bind.fv(Z, data.frame(han=Ihan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- matrix(extract(Jii, "km"), nrow=ntypes, byrow=TRUE) Ikm <- apply(fi * Jkm, 2, sum) - Jdotdot$km Z <- bind.fv(Z, data.frame(km=Ikm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") } unitname(Z) <- unitname(X) attr(Z, "conserve") <- attr(Jdotdot, "conserve") return(Z) } extract <- function(Zlist, what) sapply(Zlist, "[[", i=what) Iest }) spatstat.explore/R/edgeTrans.R0000644000176200001440000001053514633203032016032 0ustar liggesusers# # edgeTrans.R # # $Revision: 1.18 $ $Date: 2024/06/08 23:59:28 $ # # Translation edge correction weights # # edge.Trans(X) compute translation correction weights # for each pair of points from point pattern X # # edge.Trans(X, Y, W) compute translation correction weights # for all pairs of points X[i] and Y[j] # (i.e. one point from X and one from Y) # in window W # # edge.Trans(X, Y, W, paired=TRUE) # compute translation correction weights # for each corresponding pair X[i], Y[i]. # # To estimate the K-function see the idiom in "Kest.R" # ####################################################################### edge.Trans <- function(X, Y=X, W=Window(X), exact=FALSE, paired=FALSE, ..., trim=spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW = NULL) { given.dxdy <- !is.null(dx) && !is.null(dy) if(!given.dxdy) { ## dx, dy will be computed from X, Y X <- as.ppp(X, W) W <- X$window Y <- if(!missing(Y)) as.ppp(Y, W) else X nX <- X$n nY <- Y$n if(paired) { if(nX != nY) stop("X and Y should have equal length when paired=TRUE") dx <- Y$x - X$x dy <- Y$y - X$y } else { dx <- outer(X$x, Y$x, "-") dy <- outer(X$y, Y$y, "-") } } else { ## dx, dy given if(paired) { ## dx, dy are vectors check.nvector(dx, vname="dx") check.nvector(dy, vname="dy") stopifnot(length(dx) == length(dy)) } else { ## dx, dy are matrices check.nmatrix(dx, mname="dx") check.nmatrix(dy, mname="dy") stopifnot(all(dim(dx) == dim(dy))) nX <- nrow(dx) nY <- ncol(dx) } stopifnot(is.owin(W)) } ## For irregular polygons, exact evaluation is very slow; ## so use pixel approximation, unless exact=TRUE if(W$type == "polygonal" && !exact) W <- as.mask(W) ## compute if(!paired) { dx <- as.vector(dx) dy <- as.vector(dy) } switch(W$type, rectangle={ ## Fast code for this case wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high / ((wide - abs(dx)) * (high - abs(dy))) }, polygonal={ ## This code is SLOW n <- length(dx) weight <- numeric(n) if(n > 0) { for(i in seq_len(n)) { Wshift <- shift(W, c(dx[i], dy[i])) weight[i] <- overlap.owin(W, Wshift) } weight <- area(W)/weight } }, mask={ ## compute set covariance of window if(is.null(gW)) gW <- setcov(W) ## evaluate set covariance at these vectors gvalues <- lookup.im(gW, dx, dy, naok=TRUE, strict=FALSE) weight <- area(W)/gvalues } ) ## clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=nX, ncol=nY) if(give.rmax) attr(weight, "rmax") <- rmax.Trans(W, gW) return(weight) } ## maximum radius for translation correction ## = radius of largest circle centred at 0 contained in W + ^W rmax.Trans <- function(W, g=setcov(W)) { ## calculate maximum permissible 'r' value ## for validity of translation correction W <- as.owin(W) if(is.rectangle(W)) return(shortside(W)) ## find support of set covariance if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(min(x^2 + y^2))) return(rmax) } ## maximum radius for rigid motion correction ## = radius of smallest circle centred at 0 containing W + ^W rmax.Rigid <- function(X, g=setcov(as.owin(X))) { stopifnot(is.ppp(X) || is.owin(X)) if(is.ppp(X)) return(max(pairdist(X[chull(X)]))) W <- X if(is.rectangle(W)) return(diameter(W)) if(is.null(g)) g <- setcov(W) eps <- 2 * max(1, max(g)) * .Machine$double.eps gsupport <- solutionset(g > eps) gboundary <- bdry.mask(gsupport) xy <- rasterxy.mask(gboundary, drop=TRUE) rmax <- with(xy, sqrt(max(x^2 + y^2))) return(rmax) } spatstat.explore/R/blurHeat.R0000644000176200001440000000771714700374620015703 0ustar liggesusers#' #' blurHeat.R #' #' Image blurring by diffusion #' #' Copyright (C) 2018-2024 Adrian Baddeley, Tilman Davies and Suman Rakshit #' #' Licence: GNU Public Licence >= 2 #' #' $Revision: 1.3 $ $Date: 2024/10/06 02:28:55 $ blurHeat <- function(X, ...) { UseMethod("blurHeat") } blurHeat.im <- function(X, sigma, ..., connect=8, symmetric=FALSE, k=1, show=FALSE) { Y <- as.im(X) check.1.integer(k) stopifnot(k >= 1) if(!(connect %in% c(4,8))) stop("connectivity must be 4 or 8") if(is.im(sigma)) { # ensure Y and sigma are on the same grid A <- harmonise(Y=Y, sigma=sigma) Y <- A$Y sigma <- A$sigma } else if(is.function(sigma)) { sigma <- as.im(sigma, as.owin(Y)) } else check.1.real(sigma) #' initial state v <- as.matrix(Y) u <- as.vector(v) #' symmetric random walk? if(symmetric) { asprat <- with(Y, ystep/xstep) if(abs(asprat-1) > 0.01) warning(paste("Symmetric random walk on a non-square grid", paren(paste("aspect ratio", asprat))), call.=FALSE) } #' determine appropriate jump probabilities & time step pmax <- 1/(connect+1) # maximum permitted jump probability xstep <- Y$xstep ystep <- Y$ystep minstep <- min(xstep, ystep) if(symmetric) { #' all permissible transitions have the same probability 'pjump'. #' Determine Nstep, and dt=sigma^2/Nstep, such that #' Nstep >= 16 and M * pjump * minstep^2 = dt M <- if(connect == 4) 2 else 6 Nstep <- max(16, ceiling(max(sigma)^2/(M * pmax * minstep^2))) sn <- (sigma^2)/Nstep px <- py <- pxy <- sn/(M * minstep^2) } else { #' px is the probability of jumping 1 step to the right #' py is the probability of jumping 1 step up #' if connect=4, horizontal and vertical jumps are exclusive. #' if connect=8, horizontal and vertical increments are independent #' Determine Nstep, and dt = sigma^2/Nstep, such that #' Nstep >= 16 and 2 * pmax * minstep^2 = dt Nstep <- max(16, ceiling(max(sigma)^2/(2 * pmax * minstep^2))) sn <- (sigma^2)/Nstep px <- sn/(2 * xstep^2) py <- sn/(2 * ystep^2) if(max(px) > pmax) stop("Internal error: px exceeds pmax") if(max(py) > pmax) stop("Internal error: py exceeds pmax") if(connect == 8) pxy <- px * py } #' construct adjacency matrices dimv <- dim(v) my <- gridadjacencymatrix(dimv, across=FALSE, down=TRUE, diagonal=FALSE) mx <- gridadjacencymatrix(dimv, across=TRUE, down=FALSE, diagonal=FALSE) if(connect == 8) mxy <- gridadjacencymatrix(dimv, across=FALSE, down=FALSE, diagonal=TRUE) #' restrict to window if(anyNA(u)) { ok <- !is.na(u) u <- u[ok] mx <- mx[ok,ok,drop=FALSE] my <- my[ok,ok,drop=FALSE] if(connect == 8) mxy <- mxy[ok,ok,drop=FALSE] if(is.im(sigma)) { px <- px[ok] py <- py[ok] if(connect == 8) pxy <- pxy[ok] } } else ok <- TRUE #' construct iteration matrix if(connect == 4) { A <- px * mx + py * my } else { A <- px * (1 - 2 * py) * mx + py * (1 - 2 * px) * my + pxy * mxy } #' debug stopifnot(min(rowSums(A)) >= 0) stopifnot(max(rowSums(A)) <= 1) #' diag(A) <- 1 - rowSums(A) #' k-step transition probabilities if(k > 1) { Ak <- A for(j in 2:k) Ak <- Ak %*% A } else Ak <- A k <- as.integer(k) Nstep <- as.integer(Nstep) Nblock <- Nstep/k Nrump <- Nstep - Nblock * k #' run U <- u Z <- Y if(!show) { for(istep in 1:Nblock) U <- U %*% Ak } else { opa <- par(ask=FALSE) each <- max(1, round(Nblock/60)) for(istep in 1:Nblock) { U <- U %*% Ak if(istep %% each == 0) { Z[] <- as.vector(U) f <- sqrt(istep/Nstep) main <- if(is.im(sigma)) paste(signif(f, 3), "* sigma") else paste("sigma =", signif(f * sigma, 3)) plot(Z, main=main) Sys.sleep(0.4) } } par(opa) } if(Nrump > 0) for(istep in 1:Nrump) U <- U %*% A #' pack up Z[] <- as.vector(U) return(Z) } spatstat.explore/R/adaptive.density.R0000644000176200001440000000101314611073307017366 0ustar liggesusers#' #' adaptive.density.R #' #' $Revision: 1.3 $ $Date: 2022/06/29 03:05:22 $ #' adaptive.density <- function(X, ..., method=c("voronoi", "kernel", "nearest")) { method <- match.arg(method) result <- switch(method, voronoi = densityVoronoi(X, ...), kernel = densityAdaptiveKernel(X, ...), nearest = { if(is.lpp(X)) stop("not implemented for lpp objects") nndensity(X, ...) }) return(result) } spatstat.explore/R/evaluatecovariates.R0000644000176200001440000001356214635665500020026 0ustar liggesusers#' #' evaluatecovariates.R #' #' Evaluate covariates at specified locations #' #' $Revision: 1.9 $ $Date: 2023/05/02 06:53:46 $ #' evaluateCovariate <- function(covariate, locations, ...) { if(is.owin(locations)) { evaluateCovariateAtPixels(covariate, locations, ...) } else { evaluateCovariateAtPoints(covariate, locations, ...) } } evaluateCovariateAtPoints <- function(covariate, locations, ..., allow.column=TRUE) { AvoidNames <- c("eps", "dimyx", "rule.eps", "types") stopifnot(is.ppp(locations)) n <- npoints(locations) marx <- marks(locations) # may be null lev <- levels(marx) # may be null if(is.im(covariate)) { ## single pixel image values <- safelookup(covariate, locations) } else if(is.imlist(covariate)) { ## list of images for each type of point if(length(covariate) != length(lev)) stop(paste("Number of covariate images", paren(length(covariate)), "does not match number of possible types in data", paren(length(lev))), call.=FALSE) values <- vector(mode="list", length=n) mm <- as.integer(marx) for(k in which(as.integer(table(mm)) > 0)) { relevant <- which(mm == k) values[relevant] <- safelookup(covariate[[k]], locations[relevant]) } values <- unlist(values) } else if(is.function(covariate)) { ## function(x,y) or function(x,y,m) if(length(formals(covariate)) <= 2L || !any(c("m", "marks") %in% names(formals(covariate)))) { ## function does not use mark value values <- do.call.without(covariate, locations$x, locations$y, ..., avoid=AvoidNames) } else { ## function expects the mark values values <- do.call.without(covariate, locations$x, locations$y, marx, ..., avoid=AvoidNames) } } else if(is.list(covariate) && all(sapply(covariate, is.function))) { ## list of functions for each type of point if(length(covariate) != length(lev)) stop(paste("Number of covariate functions", paren(length(covariate)), "does not match number of possible types in data", paren(length(lev))), call.=FALSE) values <- vector(mode="list", length=n) xx <- locations$x yy <- locations$y mm <- as.integer(marx) for(k in which(as.integer(table(mm)) > 0)) { relevant <- which(mm == k) values[relevant] <- do.call.without(covariate[[k]], xx[relevant], yy[relevant], ..., avoid=AvoidNames) } values <- unlist(values) } else if(is.numeric(covariate) || is.factor(covariate)) { ## numeric/categorical value or vector if(length(covariate) == 1L) { values <- rep.int(covariate, n) } else if(allow.column && length(covariate) == n) { ## NOTE values <- covariate } else stop("Inappropriate length for covariate vector") } else if(is.list(covariate) && all(lengths(covariate) == 1L) && (all(sapply(covariate, is.numeric)) || all(sapply(covariate, is.factor)))) { ## list of single values, assumed to correspond to types if(length(covariate) != length(lev)) stop(paste("Length of list of covariate values", paren(length(covariate)), "does not match number of possible types in data", paren(length(lev))), call.=FALSE) values <- unlist(covariate[as.integer(marx)]) } else stop("Covariate should be an image, a function or a factor/numeric vector") return(values) } evaluateCovariateAtPixels <- function(covariate, locations, ..., types=NULL, eps=NULL, dimyx=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame")) { stopifnot(is.owin(locations)) rule.eps <- match.arg(rule.eps) M <- as.mask(locations, eps=eps, dimyx=dimyx, rule.eps=rule.eps) if(is.im(covariate)) { value <- covariate[M, raster=M, drop=FALSE] } else if(is.imlist(covariate)) { value <- solapply(covariate, "[", i=M, raster=M, drop=FALSE) } else if(is.function(covariate)) { if(is.null(types) || length(formals(covariate)) <= 2L || !any(c("m", "marks") %in% names(formals(covariate)))) { ## function (x,y) or function(x,y,...) does not use mark value value <- as.im(covariate, W=M, ...) } else { ## function f(x,y,m) or function(x,y,m, ...) expects mark value value <- solapply(types, function(a) { as.im(covariate, W=M, a, ...) }) } } else if(is.list(covariate) && all(sapply(covariate, is.function))) { ## list of function(x,y) or list of function(x,y,..) value <- solapply(covariate, as.im, W=M, ...) } else if(identical(covariate, "x")) { value <- as.im(function(x,y){x}, W=M) } else if(identical(covariate, "y")) { value <- as.im(function(x,y){y}, W=M) } else if(is.numeric(covariate) || is.factor(covariate)) { value <- solapply(covariate, as.im, W=M) } else if(is.list(covariate) && all(lengths(covariate) == 1L) && all(sapply(covariate, is.numeric) | sapply(covariate, is.factor))) { ## list of single values, associated with types if(length(types) > 0 && length(covariate) != length(types)) stop(paste("Length of list of covariate values", paren(length(covariate)), "does not match number of possible types in data", paren(length(types))), call.=FALSE) value <- solapply(covariate, as.im, W=M) } else stop("Format of covariate is not understood") if(!is.null(types)) { ## Ensure result is a solist of the right length if(is.im(value)) { value <- rep(list(value), length(types)) } else if(length(value) != length(types)) warning("Mismatch between number of covariates and number of types", call.=FALSE) names(value) <- types } return(value) } spatstat.explore/R/studpermutest.R0000644000176200001440000005627714611073311017064 0ustar liggesusers#' #' studpermtest.R #' #' Original by Ute Hahn 2014 #' #' $Revision: 1.11 $ $Date: 2023/11/18 04:41:42 $ #' #' Studentized permutation test for comparison of grouped point patterns; #' functions to generate these grouped point patterns; #' wrapper for test of reweighted second order stationarity. #' #' studpermu.test #' studentized permutation test for grouped point patterns #' interpreted version, random permutations only. #' A group needs to contain at least two point patterns with at least minpoints each. # #' X the data, may be a list of lists of point patterns, or a hyperframe #' formula if X is a hyperframe, relates point patterns to factor variables that #' determine the groups. If missing, the first column of X that contains #' a factor variable is used. #' summaryfunction the function used in the test #' ... additional arguments for summaryfunction #' rinterval r-interval where summaryfunction is evaluated. If NULL, the #' interval is calculated from spatstat defaults #' (intersection for all patterns) #' nperm number of random permutations #' use.Tbar use the alternative test statistic, for summary functions with #' roughly constant variance, such as K/r or L #' minpoints the minimum number of points a pattern needs to have. Patterns #' with fewer points are not used. #' rsteps discretization steps of the r-interval #' r arguments at which to evaluate summaryfunction, overrides rinterval #' Should normally not be given, replace by rinterval instead, #' this allows r_0 > 0. Also, there is no plausibility check for r so far #' arguments.in.data if TRUE, individual extra arguments to summary function that #' change are taken from X (which has to be a hyperframe then). #' Assumes that the first argument of summaryfunction always is the #' point pattern. #' This is meant for internal purposes (automatisation) # #' returns an object of classes htest and studpermutest, that can be plotted. The #' plot shows the summary functions for the groups (and the means if requested) studpermu.test <- local({ studpermu.test <- function (X, formula, summaryfunction = Kest, ..., rinterval = NULL, nperm = 999, use.Tbar = FALSE, # the alternative statistic, use with K/r or L minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) { #' ---- the loooong preliminaries ------- #' ---- argument checking paranoia ---- if (arguments.in.data & !is.hyperframe(X)) stop(paste("X needs to be a hyperframe", "if arguments for summary function are to be retrieved"), call.=FALSE) stopifnot(is.function(summaryfunction)) #' there could be more... #' first prepare the data if(is.hyperframe(X)) { if(dim(X)[2] < 2) stop(paste("Hyperframe X needs to contain at least 2 columns,", "one for patterns, one indicating groups"), call.=FALSE) data <- X # renaming for later. Xclass <- unclass(X)$vclass factorcandidate <- Xclass %in% c("integer", "numeric", "character", "factor") ppcandidate <- Xclass == "ppp" names(factorcandidate) <- names(ppcandidate) <- names(Xclass) <- Xnames <- names(X) if(all(!factorcandidate) || all(!ppcandidate)) stop(paste("Hyperframe X needs to contain at least a column", "with point patterns, and one indicating groups"), call.=FALSE) if(!missing(formula)){ #' safety precautions ;-) if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) if (length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) rhs <- rhs.of.formula(formula) ppname <- formula[[2]] if (!is.name(ppname)) stop("Left hand side of formula should be a single name") ppname <- paste(ppname) if(!ppcandidate[ppname]) stop(paste("Left hand side of formula", "should be the name of a column of point patterns"), call.=FALSE) groupvars <- all.vars(as.expression(rhs)) if(!all(groupvars %in% Xnames) || any(!factorcandidate[groupvars])) stop(paste("Not all variables on right hand side of formula", "can be interpreted as factors"), call.=FALSE) #' make the groups to be compared group <- interaction(lapply(as.data.frame(data[ , groupvars, drop=FALSE]), factor)) #' rename the point patterns, needs the patch newnames <- Xnames newnames[Xnames == ppname] <- "pp" names(data) <- newnames data$group <- group } else { #' No formula supplied. #' Choose first ppp column and first factor column to make pp and groups thepp <- which.max(ppcandidate) thegroup <- which.max(factorcandidate) #' fake formula for output of test result formula <- as.formula(paste( Xnames[thepp],"~", Xnames[thegroup])) newnames <- Xnames newnames[thepp] <- "pp" newnames[thegroup] <- "group" names(data) <- newnames data$group <- as.factor(data$group) } } else { #' X is not a hyperframe, but hopefully a list of ppp if(!is.list(X)) stop("X should be a hyperframe or a list of lists of point patterns") if (!is.list(X[[1]]) || !is.ppp(X[[1]][[1]])) stop("X is a list, but not a list of lists of point patterns") nams <- names(X) if(is.null(nams)) nams <- paste("group", seq_along(X)) pp <- list() group <- NULL for (i in seq_along(X)) { pp <- c(pp, X[[i]]) group <- c(group, rep(nams[i], length(X[[i]]))) } group <- as.factor(group) data <- hyperframe(pp = pp, group = group) ppname <- "pp" } framename <- short.deparse(substitute(X)) fooname <- short.deparse(substitute(summaryfunction)) #' sorting out the patterns that contain too few points OK <- sapply(data$pp, npoints) >= minpoints if((nbad <- sum(!OK)) > 0) warning(paste(nbad, "patterns have been discarded", "because they contained fewer than", minpoints, "points"), call.=FALSE) data <- data[OK, ,drop=FALSE] pp <- data$pp #' ---- the groups, #' or what remains after discarding the poor patterns with few points ----- #' check if at least two observations in each group groupi <- as.integer(data$group) ngroups <- max(groupi) if(ngroups < 2) stop(paste("Sorry, after discarding patterns with fewer than", minpoints, "points,", if(ngroups < 1) "nothing" else "only one group", "is left over.", "\n- nothing to compare, take a break!"), call.=FALSE) lev <- 1:ngroups m <- as.vector(table(groupi)) if (any(m < 2)) stop(paste("Data groups need to contain at least two patterns;", "\nafter discarding those with fewer than", minpoints, "points, the remaining group sizes are", commasep(m)), call.=FALSE) #' check if number of possible outcomes is small #' WAS: npossible <- factorial(sum(m))/prod(factorial(m))/prod(factorial(table(m))) lognpossible <- lgamma(sum(m)+1)-sum(lgamma(m+1))-sum(lgamma(table(m)+1)) if (lognpossible < log(max(100, nperm))) warning("Don't expect exact results - group sizes are too small") #' --------- real preliminaries now ------ #' get interval for arguments if(!is.null(r)){ rinterval <- range(r) rsteps <- length(r) } else if (is.null(rinterval)) { foochar <- substr(fooname, 1, 1) if (foochar %in% c("p", "L")) foochar <- "K" if (fooname %in% c("Kscaled", "Lscaled")) foochar <- "Kscaled" rinterval <- c(0, min(with(data, rmax.rule(foochar, Window(pp), intensity(pp))))) } ranger <- diff(range(rinterval)) #' r sequence needs to start at 0 for Kest and such rr <- r %orifnull% seq(0, rinterval[2], length.out = rsteps + 1) taker <- rr >= rinterval[1] & rr <= rinterval[2] # used for testing #' now estimate the summary function, finally... #' TO DO!!!! Match function call of summary function with data columns! #' use arguments.in.data, if applicable. This is for inhomogeneous summary #' functions #' Force all calls to summaryfunction to use the same edge correction, #' rather than allowing correction to depend on npoints needcorx <- "correction" %in% names(formals(summaryfunction)) gavecorx <- "correction" %in% names(list(...)) corx <- if(needcorx && !gavecorx) "best" else NULL #' --------- retrieve arguments for summary function from data, hvis det er fvlist <- if(arguments.in.data) { #' use arguments in hyperframe 'data' as well as explicit arguments if(is.null(corx)) { multicall(summaryfunction, pp, data, r = rr, ...) } else { multicall(summaryfunction, pp, data, r = rr, ..., correction=corx) } } else { #' use explicit arguments only if(is.null(corx)) { with(data, summaryfunction(pp, r = rr, ...)) } else { with(data, summaryfunction(pp, r = rr, ..., correction=corx)) } } fvtemplate <- fvlist[[1]] valu <- attr(fvtemplate, "valu") argu <- attr(fvtemplate, "argu") foar <- sapply(lapply(fvlist, "[[", valu), "[", taker) #' --------- the real stuff -------------- #' function that calculates the discrepancy #' slow version combs <- combn(lev, 2) #' --------- now do the real real stuff :-) ------------- #' generate "simulated values" from random permutations. #' possible improvement for future work: #' If the number of all permutations (combis) is small, #' first generate all permutations and then #' sample from them to improve precision predigested <- list(lev=lev, foar=foar, m=m, combs=combs, rrr=rr[taker], ranger=ranger) if(use.Tbar) { Tobs <- Tbarstat(groupi, predigested) Tsim <- replicate(nperm, Tbarstat(sample(groupi), predigested)) } else { Tobs <- Tstat(groupi, predigested) Tsim <- replicate(nperm, Tstat(sample(groupi), predigested)) } names(Tobs) <- if(use.Tbar) "Tbar" else "T" pval <- (1 + sum(Tobs < Tsim))/(1 + nperm) #' ----- making a test object ----- method <- c("Studentized permutation test for grouped point patterns", if(is.hyperframe(X)) pasteFormula(formula) else NULL, choptext(ngroups, "groups:", paste(levels(data$group), collapse=", ")), choptext("summary function:", paste0(fooname, ","), "evaluated on r in", prange(rinterval)), choptext("test statistic:", if(use.Tbar) "Tbar," else "T,", nperm, "random permutations")) fooshort <- switch(fooname, pcf = "pair correlation ", Kinhom = "inhomogeneous K-", Linhom = "inhomogeneous L-", Kscaled = "locally scaled K-", Lscaled = "locally scaled L-", paste(substr(fooname, 1, 1),"-",sep="")) alternative <- c(paste("not the same ",fooshort,"function", sep="")) testerg <- list(statistic = Tobs, p.value = pval, alternative = alternative, method = method, data.name = framename) class(testerg) <- c("studpermutest", "htest") #' Add things for plotting #' prepare the fvlist, so that it only contains the estimates used, fvs <- lapply(fvlist, "[.fv", j=c(argu, valu)) #' with rinterval as alim fvs <- lapply(fvs, "attr<-", which="alim", value=rinterval) testerg$curves <- hyperframe(fvs = fvs, groups = data$group) fvtheo <- fvlist[[1]] fvnames(fvtheo, ".y") <- "theo" attr(fvtheo, "alim") <- rinterval testerg$curvtheo <- fvtheo[ , c(argu, "theo")] #' group means grmn <- lapply(lev, splitmean, ind=groupi, f=foar) testerg$groupmeans <- lapply(grmn, makefv, xvals=rr[taker], template=fvtheo) return(testerg) } splitmean <- function(l, ind, f) { apply(f[ , ind == l], 1, mean) } splitvarn <- function(l, ind, f, m) { apply(f[ , ind == l], 1, var) / m[l] } studentstat <- function(i, grmean, grvar) { (grmean[, i[1]] - grmean[, i[2]])^2 / (grvar[i[1],] + grvar[i[2], ]) } Tstat <- function (ind = groupi, predigested) { #' predigested should be a list with entries lev, foar, m, combs, rrr with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) y <- apply(combs, 2, studentstat, grmean=grmean, grvar=grvar) sum(apply(y, 2, trapint, x = rrr)) }) } intstudent <- function(i, rrr, grmean, meangrvar) { trapint(rrr, (grmean[, i[1]] - grmean[, i[2]])^2 / (meangrvar[i[1]] + meangrvar[i[2]])) } Tbarstat <- function (ind = groupi, predigested) { #' predigested should be a list #' with entries lev, foar, m, combs, rrr, ranger with(predigested, { grmean <- sapply(lev, splitmean, ind=ind, f=foar) grvar <- t(sapply(lev, splitvarn, ind=ind, f=foar, m=m)) meangrvar <- apply(grvar, 1, trapint, x = rrr)/ranger sum(apply(combs, 2, intstudent, rrr=rrr, grmean=grmean, meangrvar=meangrvar)) #' trapint(rr[taker], grvar[i[1],] + grvar[i[2], ])))) }) } makefv <- function(yvals, xvals, template) { fdf <- data.frame(r = xvals, y = yvals) argu <- fvnames(template, ".x") valu <- fvnames(template, ".y") names(fdf) <- c(argu,valu) fv(fdf, argu = argu, ylab = attr(template, "ylab"), valu = valu, fmla = attr(template,"fmla"), alim = attr(template, "alim")) } #' Trapezoidal rule approximation to integral #' ------- Trapezregel, mit Behandlung von NAns: #' die werden einfach ignoriert ---- trapint <- function(x, y) { nonan <- !is.na(y) nn <- sum(nonan) if(nn < 2L) return(0) Y <- y[nonan] X <- x[nonan] 0.5 * sum( (Y[-1] + Y[-nn]) * diff(X)) } #' call foo(x, further arguments) repeatedly #' further arguments are taken from hyperframe H and ... multicall <- function(foo, x, H, ...){ stopifnot(is.hyperframe(H)) if (is.hyperframe(x)) { x <- as.list(x)[[1]] } else if(!is.list(x)) stop("in multicall: x should be a hyperframe or list", call.=FALSE) #' check if same length nrows <- dim(H)[1] if (length(x) != nrows) stop(paste("in multicall: x and H need to have", "the same number of rows or list elements"), call.=FALSE) dotargs <- list(...) hnames <- names(H) argnames <- names(formals(foo))#' always assume first argument is given ppname <- argnames[1] argnames <- argnames[-1] dotmatch <- pmatch(names(dotargs), argnames) dotmatched <- dotmatch[!is.na(dotmatch)] dotuseargs <- dotargs[!is.na(dotmatch)] restargs <- if(length(dotmatched) >0) argnames[-dotmatched] else argnames hmatch <- pmatch(hnames, restargs) huse <- !is.na(hmatch) lapply(seq_len(nrows), function (i) do.call(foo, c(list(x[[i]]), as.list(H[i, huse, drop=TRUE, strip=FALSE]), dotargs))) } studpermu.test }) #' ------------------- plot studpermutest --------------------------------------- # #' plot.studpermutest #' plot the functions that were used in studperm.test #' also plot group means, if requested # #' x a studpermtest object, the test result #' fmla a plot formula as in plot.fv, should be generic, using "." for values #' ... further plot parameters #' col, lty, lwd parameter (vectors) for plotting the individual summary functions, #' according to group, if vectors #' col.theo, lty.theo, lwd.theo if not all are NULL, the "theo" curve is also plotted #' lwd.mean a multiplyer for the line width of the group means. #' if NULL, group means are not plotted, defaults to NULL #' lty.mean, col.mean selbsterklaerend #' separately generate a separate plot for each group (then no legends are plotted) #' meanonly do not plot individual summary functions #' legend if TRUE, and plots are not separate, plot a legend #' legendpos ... #' lbox if TRUE, draw box around legend. Defaults to FALSE #' add ... plot.studpermutest <- local({ plot.studpermutest <- function(x, fmla, ..., lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if(meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if(meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox=FALSE, add = FALSE) { stopifnot(inherits(x, "studpermutest")) env.user <- parent.frame() curvlists <- split(x$curves, x$curves$groups) ngroups <- length(curvlists) gnames <- names(curvlists) #' check if theoretical functions shall be plottet plottheo <- !(is.null(lty.theo) & is.null(col.theo) & is.null(lwd.theo)) #' prepare plot parameters for groups if (is.null(lty)) lty <- 1:ngroups if (is.null(col)) col <- 1:ngroups if (is.null(lwd)) lwd <- par("lwd") if (is.null(col.mean)) col.mean <- col if (is.null(lty.mean)) lty.mean <- lty lty <- rep(lty, length.out = ngroups) col <- rep(col, length.out = ngroups) lwd <- rep(lwd, length.out = ngroups) col.mean <- rep(col.mean, length.out = ngroups) lty.mean <- rep(lty.mean, length.out = ngroups) if (plottheo){ if (is.null(lty.theo)) lty.theo <- ngroups + 1#par("lty") if (is.null(col.theo)) col.theo <- ngroups + 1 #par("col") if (is.null(lwd.theo)) lwd.theo <- par("lwd") } #' transporting the formula in ... unfortunately does not work #' for the axis labels, because the fvs contain only one variable. #' Have to knit them self if (is.null(ylab)) { if (!missing(fmla)) { #' puha. det bliver noget lappevaerk. fmla <- as.formula(fmla, env=env.user) map <- fvlabelmap(x$curvtheo) lhs <- lhs.of.formula(as.formula(fmla)) ylab <- eval(substitute(substitute(le, mp), list(le = lhs, mp = map))) } else ylab <- attr(x$curvtheo, "yexp") } if (missing(fmla)) fmla <- attr(x$curvtheo, "fmla") if(!is.null(lwd.mean)) lwd.Mean <- lwd.mean*lwd if(separately) { for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab = ylab, main = gnames[i]) if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], main = gnames[i], add = !meanonly, ylim = ylim) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo) } } else { #' ---- TODO SIMPLIFY! they should all have the same x-range, #' just check y-range ---- lims <- if (meanonly) { plot.fvlist(x$groupmeans, fmla,..., limitsonly = TRUE) } else { as.data.frame(apply(sapply(curvlists, function(C) plot.fvlist(C$fvs, fmla,..., limitsonly = TRUE)), 1, range)) } if(is.null(xlim)) xlim <- lims$xlim if(is.null(ylim)) ylim <- lims$ylim iadd <- add for (i in seq_along(gnames)) { if(!meanonly) plot.fvlist(curvlists[[i]]$fvs, fmla, ..., col = col[i], lwd = lwd[i], lty= lty[i], xlim = xlim, ylim = ylim, ylab= ylab, main = main, add = iadd) iadd <- iadd | !meanonly if (!is.null(lwd.mean)) plot(x$groupmeans[[i]], fmla, ..., col = col.mean[i], lwd = lwd.Mean[i], lty = lty.mean[i], add = iadd, xlim = xlim, ylim = ylim, ylab= ylab, main=main) if (plottheo) plot(x$curvtheo, fmla, ..., add = TRUE, col = col.theo, lwd = lwd.theo, lty = lty.theo, xlim = xlim, ylim = ylim, ylab= ylab, main=main) iadd <- TRUE } if(legend) { if(meanonly) { lwd <- lwd.Mean col <- col.mean lty <- lty.mean } if(plottheo){ gnames <- c(gnames, "Poisson mean") col <- c(col, col.theo) lty <- c(lty, lty.theo) lwd <- c(lwd, lwd.theo) } legend(legendpos, gnames, col = col, lty = lty, lwd = lwd, bty=ifelse(lbox, "o", "n")) } } return(invisible(NULL)) } #' ------------------ Helper function---------------- #' flist: list of fv, with plot method plot.fvlist <- function(x, fmla, ..., xlim=NULL, ylim=NULL, add = FALSE, limitsonly = FALSE, main=NULL){ #' no safety precautions if (missing(fmla)) fmla <- attr(x[[1]], "fmla") if (!add | limitsonly) { lims <- sapply(x, plot, fmla, ..., limitsonly = TRUE) if(is.null(xlim)) xlim = range(unlist(lims[1,])) if(is.null(ylim)) ylim = range(unlist(lims[2,])) lims=list(xlim=xlim, ylim=ylim) if(limitsonly) return(lims) plot(x[[1]], fmla, ..., xlim = xlim, ylim = ylim, main = main) } else plot(x[[1]], fmla,..., add=T) for (foo in x[-1]) plot(foo, fmla, ..., add=T) } plot.studpermutest }) spatstat.explore/R/quadrattest.R0000644000176200001440000004305714611073310016464 0ustar liggesusers# # quadrattest.R # # $Revision: 1.70 $ $Date: 2023/07/17 07:38:30 $ # quadrat.test <- function(X, ...) { UseMethod("quadrat.test") } quadrat.test.ppp <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method = c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { Xname <- short.deparse(substitute(X)) method <- match.arg(method) alternative <- match.arg(alternative) do.call(quadrat.testEngine, resolve.defaults(list(quote(X), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, CR=CR, fit=lambda, df.est=df.est, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim), list(...), list(Xname=Xname, fitname="CSR"))) } quadrat.test.splitppp <- function(X, ..., df=NULL, df.est=NULL, Xname=NULL) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) pool.quadrattest(lapply(X, quadrat.test.ppp, ...), df=df, df.est=df.est, Xname=Xname) } ## Code for quadrat.test.ppm and quadrat.test.slrm is moved to spatstat.model quadrat.test.quadratcount <- function(X, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., nsim=1999) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) quadrat.testEngine(Xcount=X, alternative=alternative, fit=lambda, df.est=df.est, method=method, conditional=conditional, CR=CR, nsim=nsim) } quadrat.testEngine <- function(X, nx, ny, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, ..., nsim=1999, Xcount=NULL, xbreaks=NULL, ybreaks=NULL, tess=NULL, fit=NULL, df.est=NULL, Xname=NULL, fitname=NULL) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) if(method == "MonteCarlo") { check.1.real(nsim) explain.ifnot(nsim > 0) } if(!is.null(df.est)) check.1.integer(df.est) if(is.null(Xcount)) Xcount <- quadratcount(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess) tess <- attr(Xcount, "tess") ## determine expected values under model normalised <- FALSE df.est.implied <- 0 if(is.null(fit)) { nullname <- "CSR" if(tess$type == "rect") areas <- outer(diff(tess$xgrid), diff(tess$ygrid), "*") else areas <- unlist(lapply(tiles(tess), area)) fitmeans <- sum(Xcount) * areas/sum(areas) normalised <- TRUE df.est.implied <- 1 } else if(is.im(fit) || inherits(fit, "funxy")) { nullname <- "Poisson process with given intensity" lambda <- as.im(fit, W=Window(tess)) means <- integral(lambda, tess) fitmeans <- sum(Xcount) * means/sum(means) normalised <- TRUE df.est.implied <- 1 } else if(inherits(fit, "ppm")) { if(!requireNamespace("spatstat.model")) stop("To predict a fitted model, the package spatstat.model is required", call.=FALSE) if(!is.poisson(fit)) stop("Quadrat test only supported for Poisson point process models") if(is.marked(fit)) stop("Sorry, not yet implemented for marked point process models") nullname <- paste("fitted Poisson model", sQuote(fitname)) lambda <- predict(fit, locations=Window(tess), type="intensity") means <- integral(lambda, tess) fitmeans <- sum(Xcount) * means/sum(means) normalised <- FALSE df.est.implied <- length(coef(fit)) } else if(inherits(fit, "slrm")) { if(!requireNamespace("spatstat.model")) stop("To predict a fitted model, the package spatstat.model is required", call.=FALSE) nullname <- paste("fitted spatial logistic regression", sQuote(fitname)) probs <- predict(fit, type="probabilities") ## usual case xy <- raster.xy(probs, drop=TRUE) masses <- as.numeric(probs[]) V <- tileindex(xy, Z=tess) fitmeans <- tapplysum(masses, list(tile=V)) normalised <- FALSE df.est.implied <- length(coef(fit)) } else stop("fit should be a point process model (ppm or slrm) or pixel image") df <- switch(method, Chisq = length(fitmeans) - df.est %orifnull% df.est.implied, MonteCarlo = NULL) ## assemble data for test OBS <- as.vector(t(as.table(Xcount))) EXP <- as.vector(fitmeans) if(!normalised) EXP <- EXP * sum(OBS)/sum(EXP) ## label it switch(method, Chisq = { if(CR == 1) { testname <- "Chi-squared test" reference <- statname <- NULL } else { testname <- CressieReadTestName(CR) statname <- paste("Test statistic:", CressieReadName(CR)) reference <- "(p-value obtained from chi-squared distribution)" } }, MonteCarlo = { testname <- paste(if(conditional) "Conditional" else "Unconditional", "Monte Carlo test") statname <- paste("Test statistic:", CressieReadName(CR)) reference <- NULL }) testblurb <- paste(testname, "of", nullname, "using quadrat counts") testblurb <- c(testblurb, statname, reference) #' perform test result <- X2testEngine(OBS, EXP, method=method, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testblurb, dataname=Xname) class(result) <- c("quadrattest", class(result)) attr(result, "quadratcount") <- Xcount return(result) } CressieReadStatistic <- function(OBS, EXP, lambda=1, normalise=FALSE, named=TRUE) { if(normalise) EXP <- sum(OBS) * EXP/sum(EXP) y <- if(lambda == 1) sum((OBS - EXP)^2/EXP) else if(lambda == 0) 2 * sum(ifelse(OBS > 0, OBS * log(OBS/EXP), 0)) else if(lambda == -1) 2 * sum(EXP * log(EXP/OBS)) else (2/(lambda * (lambda + 1))) * sum(ifelse(OBS > 0, OBS * ((OBS/EXP)^lambda - 1), 0)) names(y) <- if(named) CressieReadSymbol(lambda) else NULL return(y) } CressieReadSymbol <- function(lambda) { if(lambda == 1) "X2" else if(lambda == 0) "G2" else if(lambda == -1/2) "T2" else if(lambda == -1) "GM2" else if(lambda == -2) "NM2" else "CR" } CressieReadName <- function(lambda) { if(lambda == 1) "Pearson X2 statistic" else if(lambda == 0) "likelihood ratio test statistic G2" else if(lambda == -1/2) "Freeman-Tukey statistic T2" else if(lambda == -1) "modified likelihood ratio test statistic GM2" else if(lambda == -2) "Neyman modified X2 statistic NM2" else paste("Cressie-Read statistic", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } CressieReadTestName <- function(lambda) { if(lambda == 1) "Chi-squared test" else if(lambda == 0) "Likelihood ratio test" else if(lambda == -1/2) "Freeman-Tukey test" else if(lambda == -1) "Modified likelihood ratio test" else if(lambda == -2) "Neyman modified chi-squared test" else paste("Cressie-Read power divergence test", paren(paste("lambda =", if(abs(lambda - 2/3) < 1e-7) "2/3" else lambda) ) ) } X2testEngine <- function(OBS, EXP, ..., method=c("Chisq", "MonteCarlo"), CR=1, df=NULL, nsim=NULL, conditional, alternative, testname, dataname) { method <- match.arg(method) if(method == "Chisq" && any(EXP < 5)) warning(paste("Some expected counts are small;", "chi^2 approximation may be inaccurate"), call.=FALSE) X2 <- CressieReadStatistic(OBS, EXP, CR) # conduct test switch(method, Chisq = { if(!is.null(df)) names(df) <- "df" pup <- pchisq(X2, df, lower.tail=FALSE) plo <- pchisq(X2, df, lower.tail=TRUE) PVAL <- switch(alternative, regular = plo, clustered = pup, two.sided = 2 * min(pup, plo)) }, MonteCarlo = { nsim <- as.integer(nsim) if(conditional) { npts <- sum(OBS) p <- EXP/sum(EXP) SIM <- rmultinom(n=nsim,size=npts,prob=p) } else { ne <- length(EXP) SIM <- matrix(rpois(nsim*ne,EXP),nrow=ne) } simstats <- apply(SIM, 2, CressieReadStatistic, EXP=EXP, lambda=CR, normalise=!conditional) if(anyDuplicated(simstats)) simstats <- jitter(simstats) phi <- (1 + sum(simstats >= X2))/(1+nsim) plo <- (1 + sum(simstats <= X2))/(1+nsim) PVAL <- switch(alternative, clustered = phi, regular = plo, two.sided = min(1, 2 * min(phi,plo))) }) result <- structure(list(statistic = X2, parameter = df, p.value = PVAL, method = testname, data.name = dataname, alternative = alternative, observed = OBS, expected = EXP, residuals = (OBS - EXP)/sqrt(EXP), CR = CR, method.key = method), class = "htest") return(result) } print.quadrattest <- function(x, ...) { NextMethod("print") single <- is.atomicQtest(x) if(!single) splat("Pooled test") if(waxlyrical('gory')) { if(single) { cat("Quadrats: ") } else { splat("Quadrats of component tests:") } x <- as.tess(x) x <- if(is.tess(x)) unmark(x) else solapply(x, unmark) do.call(print, resolve.defaults(list(x=quote(x)), list(...), list(brief=TRUE))) } return(invisible(NULL)) } plot.quadrattest <- local({ plot.quadrattest <- function(x, ..., textargs=list()) { xname <- short.deparse(substitute(x)) if(!is.atomicQtest(x)) { # pooled test - plot the original tests tests <- extractAtomicQtests(x) dont.complain.about(tests) do.call(plot, resolve.defaults(list(x=quote(tests)), list(...), list(main=xname))) return(invisible(NULL)) } Xcount <- attr(x, "quadratcount") # plot tessellation tess <- as.tess(Xcount) do.call(plot.tess, resolve.defaults(list(quote(tess)), list(...), list(main=xname))) # compute locations for text til <- tiles(tess) ok <- sapply(til, haspositivearea) incircles <- lapply(til[ok], incircle) x0 <- sapply(incircles, getElement, name="x") y0 <- sapply(incircles, getElement, name="y") ra <- sapply(incircles, getElement, name="r") # plot observed counts cos30 <- sqrt(2)/2 sin30 <- 1/2 f <- 0.4 dotext(-f * cos30, f * sin30, as.vector(t(as.table(Xcount)))[ok], x0, y0, ra, textargs, adj=c(1,0), ...) # plot expected counts dotext(f * cos30, f * sin30, round(x$expected,1)[ok], x0, y0, ra, textargs, adj=c(0,0), ...) # plot Pearson residuals dotext(0, -f, signif(x$residuals,2)[ok], x0, y0, ra, textargs, ...) return(invisible(NULL)) } dotext <- function(dx, dy, values, x0, y0, ra, textargs, ...) { xx <- x0 + dx * ra yy <- y0 + dy * ra do.call.matched(text.default, resolve.defaults(list(x=quote(xx), y = quote(yy)), list(labels=paste(as.vector(values))), textargs, list(...)), funargs=graphicsPars("text")) } haspositivearea <- function(x) { !is.null(x) && area(x) > 0 } plot.quadrattest }) ######## pooling multiple quadrat tests into a quadrat test pool.quadrattest <- function(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) { argh <- list(...) if(!is.null(df) + !is.null(df.est)) stop("Arguments df and df.est are incompatible") if(all(unlist(lapply(argh, inherits, what="quadrattest")))) { # Each argument is a quadrattest object tests <- argh } else if(length(argh) == 1 && is.list(arg1 <- argh[[1]]) && all(unlist(lapply(arg1, inherits, "quadrattest")))) { # There is just one argument, which is a list of quadrattests tests <- arg1 } else stop("Each entry in the list must be a quadrat test") # data from all cells in all tests OBS <- unlist(lapply(tests, getElement, name="observed")) EXP <- unlist(lapply(tests, getElement, name="expected")) # RES <- unlist(lapply(tests, getElement, name="residuals")) # STA <- unlist(lapply(tests, getElement, name="statistic")) # information about each test Mkey <- unlist(lapply(tests, getElement, name="method.key")) Testname <- lapply(tests, getElement, name="method") Alternative <- unlist(lapply(tests, getElement, name="alternative")) Conditional <- unlist(lapply(tests, getElement, name="conditional")) # name of data if(is.null(Xname)) { Nam <- unlist(lapply(tests, getElement, name="data.name")) Xname <- commasep(sQuote(Nam)) } # name of test testname <- unique(Testname) method.key <- unique(Mkey) if(length(testname) > 1) stop(paste("Cannot combine different types of tests:", commasep(sQuote(method.key)))) testname <- testname[[1]] # alternative hypothesis alternative <- unique(Alternative) if(length(alternative) > 1) stop(paste("Cannot combine tests with different alternatives:", commasep(sQuote(alternative)))) # conditional tests conditional <- any(Conditional) if(conditional) stop("Sorry, not implemented for conditional tests") # Cressie-Read exponent if(is.null(CR)) { CR <- unlist(lapply(tests, getElement, name="CR")) CR <- unique(CR) if(length(CR) > 1) { warning("Tests used different values of CR; assuming CR=1") CR <- 1 } } if(method.key == "Chisq") { # determine degrees of freedom if(is.null(df)) { if(!is.null(df.est)) { # total number of observations minus number of fitted parameters df <- length(OBS) - df.est } else { # total degrees of freedom of tests # implicitly assumes independence of tests PAR <- unlist(lapply(tests, getElement, name="parameter")) df <- sum(PAR) } } # validate df if(df < 1) stop(paste("Degrees of freedom = ", df)) names(df) <- "df" } # perform test result <- X2testEngine(OBS, EXP, method=method.key, df=df, nsim=nsim, conditional=conditional, CR=CR, alternative=alternative, testname=testname, dataname=Xname) # add info class(result) <- c("quadrattest", class(result)) attr(result, "tests") <- as.solist(tests) # there is no quadratcount attribute return(result) } is.atomicQtest <- function(x) { inherits(x, "quadrattest") && is.null(attr(x, "tests")) } extractAtomicQtests <- function(x) { if(is.atomicQtest(x)) return(list(x)) stopifnot(inherits(x, "quadrattest")) tests <- attr(x, "tests") y <- lapply(tests, extractAtomicQtests) z <- do.call(c, y) return(as.solist(z)) } as.tess.quadrattest <- function(X) { if(is.atomicQtest(X)) { Y <- attr(X, "quadratcount") return(as.tess(Y)) } tests <- extractAtomicQtests(X) return(as.solist(lapply(tests, as.tess.quadrattest))) } as.owin.quadrattest <- function(W, ..., fatal=TRUE) { if(is.atomicQtest(W)) return(as.owin(as.tess(W), ..., fatal=fatal)) gezeur <- paste("Cannot convert quadrat test result to a window;", "it contains data for several windows") if(fatal) stop(gezeur) else warning(gezeur) return(NULL) } domain.quadrattest <- Window.quadrattest <- function(X, ...) { as.owin(X) } ## The shift method is undocumented. ## It is only needed in plot.listof etc shift.quadrattest <- function(X, ...) { if(is.atomicQtest(X)) { attr(X, "quadratcount") <- qc <- shift(attr(X, "quadratcount"), ...) attr(X, "lastshift") <- getlastshift(qc) } else { tests <- extractAtomicQtests(X) attr(X, "tests") <- te <- lapply(tests, shift, ...) attr(X, "lastshift") <- getlastshift(te[[1]]) } return(X) } spatstat.explore/R/densityVoronoi.R0000644000176200001440000000575014611073310017154 0ustar liggesusers#' #' densityVoronoi.R #' #' $Revision: 1.22 $ $Date: 2022/03/11 03:21:01 $ #' densityVoronoi <- function(X, ...) { UseMethod("densityVoronoi") } densityVoronoi.ppp <- function(X, f=1, ..., counting=FALSE, fixed=FALSE, nrep=1, verbose=TRUE) { stopifnot(is.ppp(X)) nX <- npoints(X) check.1.real(f) if(badprobability(f)) stop("f should be a probability between 0 and 1") check.1.integer(nrep) stopifnot(nrep >= 1) dupes <- duplicated(X, rule="deldir") anydupes <- any(dupes) ## WAS: duped <- anyDuplicated(X) ## ntess <- floor(f * nX) if(ntess == 0) { ## naive estimate of intensity if(f > 0 && verbose) splat("Tiny threshold: returning uniform intensity estimate") W <- X$window lam <- nX/area(W) return(as.im(lam, W, ...)) } if(ntess == nX) { ## Voronoi/Dirichlet estimate if(!anydupes) { tes <- dirichlet(X) tesim <- nnmap(X, what="which", ...) num <- 1 } else { UX <- X[!dupes] tes <- dirichlet(UX) tesim <- nnmap(UX, what="which", ...) idx <- nncross(X, UX, what="which") num <- as.integer(table(factor(idx, levels=seq_len(npoints(UX))))) } lam <- num/tile.areas(tes) out <- eval.im(lam[tesim]) return(out) } if(nrep > 1) { ## estimate is the average of nrep randomised estimates total <- 0 if(verbose) cat(paste("Computing", nrep, "intensity estimates...")) state <- list() for(i in seq_len(nrep)) { estimate <- densityVoronoi.ppp(X, f, ..., counting=counting, fixed=fixed, nrep=1) total <- eval.im(total + estimate) if(verbose) state <- progressreport(i, nrep, state=state) } if(verbose) cat("Done.\n") average <- eval.im(total/nrep) return(average) } ## ------ This is the main calculation ------- ## perform thinning if(!fixed) { itess <- thinjump(nX, f) tessfrac <- f } else { itess <- sample(seq_len(nX), ntess, replace=FALSE) tessfrac <- as.numeric(ntess)/nX } Xtess <- X[itess] if(anydupes) { dupes2 <- duplicated(Xtess, rule="deldir") if(any(dupes2)) { Xtess <- Xtess[!dupes2] tessfrac <- mean(!dupes2) * tessfrac } } ## handle trivial cases nXT <- npoints(Xtess) if(nXT <= 1) { W <- Window(X) if(nXT == 0) return(as.im(0, W, ...)) # dirichlet(Xtess) undefined if(nXT == 1) return(as.im(1/area(W), W, ...)) # efficiency } ## make tessellation tes <- dirichlet(Xtess) ## estimate intensity in each tile if(!counting) { tilemass <- 1 expansion <- 1/tessfrac } else { Xcount <- X[-itess] tilemap <- tileindex(Xcount$x, Xcount$y, tes) tilemass <- as.numeric(table(tilemap)) expansion <- 1/(1-tessfrac) } lam <- expansion * tilemass/tile.areas(tes) ## estimate of intensity at each location tesim <- nnmap(Xtess, what="which", ...) out <- eval.im(lam[tesim]) return(out) } spatstat.explore/R/Kmulti.R0000644000176200001440000002744514611073307015401 0ustar liggesusers# # Kmulti.S # # Compute estimates of cross-type K functions # for multitype point patterns # # $Revision: 5.61 $ $Date: 2023/02/28 02:06:33 $ # # # -------- functions ---------------------------------------- # Kcross() cross-type K function K_{ij} # between types i and j # # Kdot() K_{i\bullet} # between type i and all points regardless of type # # Kmulti() (generic) # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # including 'marks' vector # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lcross" <- function(X, i, j, ..., from, to, correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(j)) j <- if(!missing(to)) to else levels(marks(X))[2] if(missing(correction)) correction <- NULL K <- Kcross(X, i, j, ..., correction=correction) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) L <- rebadge.fv(L, substitute(L[i,j](r), list(i=iname,j=jname)), c("L", paste0("list(", iname, ",", jname, ")")), new.yexp=substitute(L[list(i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Ldot" <- function(X, i, ..., from, correction) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- if(!missing(from)) from else levels(marks(X))[1] if(missing(correction)) correction <- NULL K <- Kdot(X, i, ..., correction=correction) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[i ~ dot](r), list(i=iname)), c("L", paste(iname, "~ symbol(\"\\267\")")), new.yexp=substitute(L[i ~ symbol("\267")](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Kcross" <- function(X, i, j, r=NULL, breaks=NULL, correction =c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from, to) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] if(missing(j)) j <- if(!missing(to)) to else levels(marx)[2] I <- (marx == i) if(!any(I)) stop(paste("No points have mark i =", i)) if(i == j) { ## use Kest XI <- X[I] dont.complain.about(XI) result <- do.call(Kest, resolve.defaults(list(X=quote(XI), r=quote(r), breaks=quote(breaks), correction=correction, ratio=ratio), list(rmax=NULL), ## forbidden list(...))) } else { J <- (marx == j) if(!any(J)) stop(paste("No points have mark j =", j)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ratio=ratio, ...) } result <- rebadge.as.crossfun(result, "K", NULL, i, j) return(result) } "Kdot" <- function(X, i, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE, from) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- if(!missing(from)) from else levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points if(!any(I)) stop(paste("No points have mark i =", i)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) result <- rebadge.as.dotfun(result, "K", NULL, i) return(result) } "Kmulti"<- function(X, I, J, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., rmax=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- Window(X) areaW <- area(W) dotargs <- list(...) domainI <- resolve.1.default("domainI", dotargs) %orifnull% W domainJ <- resolve.1.default("domainJ", dotargs) %orifnull% W areaI <- area(domainI) areaJ <- area(domainJ) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", periodic="periodic", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) I <- ppsubset(X, I, "I") J <- ppsubset(X, J, "J") if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") if(!any(I)) stop("no points belong to subset I") if(!any(J)) stop("no points belong to subset J") nI <- sum(I) nJ <- sum(J) lambdaI <- nI/areaI lambdaJ <- nJ/areaJ npairs <- nI * nJ # r values rmaxdefault <- rmax %orifnull% rmax.rule("K", W, lambdaJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- ratfv(K, NULL, npairs, "r", quote(K[IJ](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "list(I,J)"), yexp=quote(K[list(I,J)](r)), ratio=ratio) ## Extract relevant points XI <- X[I] XJ <- X[J] ## Map XI and XJ to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] ## Find close pairs of points if(any(correction != "periodic")) { ## Find close pairs of points in Euclidean distance close <- crosspairs(XI, XJ, max(r), what="ijd", iX=imap, iY=jmap) ## extract information for these pairs (relative to orderings of XI, XJ) dcloseIJ <- close$d icloseI <- close$i jcloseJ <- close$j } ## ........................................................... ## Compute estimates by each of the selected edge corrections. ## ........................................................... if(any(correction == "none")) { # uncorrected! wh <- whist(dcloseIJ, breaks$val) # no weights Kun <- cumsum(wh)/(lambdaI * lambdaJ * areaI) K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(un=Kun), denominator = npairs, labl = "{hat(%s)[%s]^{un}}(r)", desc = "uncorrected estimate of %s", preferred = "un", ratio=ratio) } if(any(correction == "periodic")) { ## Periodic (toroidal) correction ## Compute periodic distances closeP <- crosspairs(XI, XJ, max(r), what="ijd", periodic=TRUE, iX=imap, iY=jmap) ## evaluate estimate wh <- whist(closeP$d, breaks$val) # no weights Kper <- cumsum(wh)/(lambdaI * lambdaJ * areaI) K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(per=Kper), denominator = npairs, labl = "{hat(%s)[%s]^{per}}(r)", desc = "periodic-corrected estimate of %s", preferred = "per", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { # border method # distance to boundary from each point of type I bI <- bdist.points(XI) # distance to boundary from first element of each (i, j) pair bcloseI <- bI[icloseI] # apply reduced sample algorithm RS <- Kount(dcloseIJ, bcloseI, bI, breaks) if(any(correction == "bord.modif")) { denom.area <- eroded.areas(W, r) Kbm <- RS$numerator/(denom.area * npairs) samplesizeKbm <- npairs * (denom.area/areaW) K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(bord.modif=Kbm), denominator = npairs, labl = "{hat(%s)[%s]^{bordm}}(r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } if(any(correction == "border")) { Kb <- RS$numerator/(lambdaJ * RS$denom.count) K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(border=Kb), denominator = RS$denom.count * nJ, labl = "{hat(%s)[%s]^{bord}}(r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) wh <- whist(dcloseIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/(lambdaI * lambdaJ * areaI) rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(trans=Ktrans), denominator = npairs, labl = "{hat(%s)[%s]^{trans}}(r)", desc = "translation-corrected estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dcloseIJ, ncol=1)) wh <- whist(dcloseIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/(lambdaI * lambdaJ * areaI) rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.ratfv(K, numerator = NULL, quotient = data.frame(iso=Kiso), denominator = npairs, labl = "{hat(%s)[%s]^{iso}}(r)", desc = "Ripley isotropic corrected estimate of %s", preferred = "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(ratio) K <- conform.ratfv(K) return(K) } spatstat.explore/R/exactppm.R0000644000176200001440000001206714611073310015741 0ustar liggesusers#' exactppm.R #' #' An internal device to represent Poisson point process models #' for which the MLE is computable exactly. #' - uniform intensity #' - intensity proportional to baseline #' These models are used mainly as a mathematical device #' in nonparametric methods such as 'rhohat' to represent the null/reference model, #' so that the code for nonparametric methods does not depend on 'ppm' #' #' $Revision: 1.6 $ $Date: 2023/05/02 07:01:00 $ exactppm <- function(X, baseline=NULL, ..., subset=NULL, eps=NULL, dimyx=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame")) { stopifnot(inherits(X, c("ppp", "quad"))) if(is.quad(X)) X <- X$data marx <- marks(X) # may be null lev <- levels(marx) # may be null rule.eps <- match.arg(rule.eps) if(is.null(subset)) { Xfit <- X } else { verifyclass(subset, "owin") Xfit <- X[subset] } if(is.null(baseline)) { #' stationary Poisson process beta <- intensity(Xfit) } else { #' Poisson process with intensity proportional to baseline if(is.im(baseline)) { denom <- integral(baseline, domain=subset) } else if(is.imlist(baseline) && is.multitype(X) && length(baseline) == length(lev)) { denom <- sapply(baseline, integral, domain=subset) } else if(is.function(baseline)) { if(!is.multitype(X) || length(formals(baseline)) == 2) { ba <- as.im(baseline, W=Window(X), eps=eps, dimyx=dimyx, rule.eps=rule.eps) denom <- integral(ba, domain=subset) } else { ba <- lapply(lev, function(z) { as.im(baseline, W=Window(X), z, eps=eps, dimyx=dimyx, rule.eps=rule.eps)}) denom <- sapply(ba, integral, domain=subset) } } else if(identical(baseline, "x")) { ba <- as.im(function(x,y){x}, W=Window(X), eps=eps, dimyx=dimyx, rule.eps=rule.eps) denom <- integral(ba, domain=subset) } else if(identical(baseline, "y")) { ba <- as.im(function(x,y){y}, W=Window(X), eps=eps, dimyx=dimyx, rule.eps=rule.eps) denom <- integral(ba, domain=subset) } else if(is.numeric(baseline) && (length(baseline) == 1 || is.multitype(X) && length(baseline) == length(lev))) { denom <- baseline * area(Window(Xfit)) } else stop("Format of 'baseline' is not understood") numer <- if(is.multitype(Xfit)) as.integer(table(marks(Xfit))) else npoints(Xfit) beta <- numer/denom if(length(beta) == length(lev)) names(beta) <- lev } model <- list(X=X, baseline=baseline, subset=subset, beta=beta) class(model) <- c("exactppm", class(model)) return(model) } print.exactppm <- function(x, ...) { with(x, { splat("Exactly-fitted point process model") if(!is.multitype(X)) { if(is.null(baseline)) splat("Homogeneous intensity", signif(beta, 4)) else splat("Intensity proportional to baseline", paren(paste("proportionality constant", signif(beta, 4)))) } else { lab <- levels(marks(X)) if(is.null(baseline)) { splat("Homogeneous intensities:") splat(paste(paste(lab, signif(beta, 4), sep=": "), collapse=", ")) } else { splat("Intensities proportional to baseline") splat("Proportionality constants:") splat(paste(paste(lab, signif(beta, 4), sep=": "), collapse=", ")) } } }) return(invisible(NULL)) } is.poisson.exactppm <- function(x) { TRUE } is.stationary.exactppm <- function(x) { is.null(x$baseline) || is.numeric(x$baseline) } predict.exactppm <- function(object, locations=NULL, ..., eps=NULL, dimyx=NULL, rule.eps=c("adjust.eps", "grow.frame", "shrink.frame")) { X <- object$X beta <- object$beta # numeric baseline <- object$baseline # covariate or NULL if(is.null(locations)) locations <- Window(X) if(length(beta) > 1) { ## Intensities for different types ## Syntax of 'evaluateCovariate' requires a list in this case beta <- as.list(beta) } ## evaluate at desired locations rule.eps <- match.arg(rule.eps) Beta <- evaluateCovariate(beta, locations, eps=eps, dimyx=dimyx, rule.eps=rule.eps) if(is.null(baseline)) { Lambda <- Beta } else { Baseline <- evaluateCovariate(baseline, locations, eps=eps, dimyx=dimyx, rule.eps=rule.eps) if(is.im(Beta) || is.imlist(Beta) || is.im(Baseline) || is.imlist(Baseline)) { Lambda <- imagelistOp(Beta, Baseline, "*") } else { Lambda <- Beta * Baseline } } ## tidy if(is.imlist(Lambda)) { if(length(Lambda) == 1) { Lambda <- Lambda[[1]] } else if(length(Lambda) == length(Beta)) { names(Lambda) <- names(beta) } } return(Lambda) } spatstat.explore/R/laslett.R0000644000176200001440000002730414611073310015570 0ustar liggesusers#' Calculating Laslett's transform #' Original by Kassel Hingee #' Adapted by Adrian Baddeley #' Copyright (C) 2016 Kassel Hingee and Adrian Baddeley # $Revision: 1.11 $ $Date: 2024/02/04 08:04:51 $ laslett <- function(X, ..., verbose=FALSE, plotit=TRUE, discretise=FALSE, type = c("lower", "upper", "left", "right")){ #' validate X and convert to a logical matrix type <- match.arg(type) oldX <- X if(is.im(X)) { X <- solutionset(X != 0) } else if(!is.owin(X)) stop("X should be an image or a window", call.=FALSE) if(type != "lower") { nrot <- match(type, c("right", "upper", "left")) theta <- nrot * pi/2 X <- rotate(X, angle=-theta) } if(!discretise && (is.polygonal(X) || is.rectangle(X))) { result <- polyLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } else { result <- maskLaslett(X, ..., oldX=oldX, verbose=verbose, plotit=FALSE) } if(type != "lower") { #' rotate back prods <- c("TanOld", "TanNew", "Rect") result[prods] <- lapply(result[prods], rotate, angle=theta) } if(plotit) plot(result, ...) result$type <- type return(result) } maskLaslett <- local({ sumtoright <- function(x) { rev(cumsum(rev(x))) - x } maskLaslett <- function(X, ..., eps=NULL, dimyx=NULL, xy=NULL, rule.eps=c("adjust.eps","grow.frame","shrink.frame"), oldX=X, verbose=FALSE, plotit=TRUE) { if(is.null(oldX)) oldX <- X rule.eps <- match.arg(rule.eps) X <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy, rule.eps=rule.eps) unitX <- unitname(X) if(is.empty(X)) stop("Empty window!") M <- as.matrix(X) #' ....... Compute transformed set ................... #' Total width of transformed set on each row TotFalse <- rowSums(!M) ## compute transformed set Laz <- (col(M) <= TotFalse[row(M)]) Laz <- owin(mask=Laz, xrange=X$xrange, yrange=X$yrange, unitname=unitX) #' Largest sub-rectangle of transformed set width <- min(TotFalse) * X$xstep Rect <- owinInternalRect(X$xrange[1L] + c(0, width), X$yrange, unitname=unitX) #' Along each horizontal line (row), #' compute a running count of FALSE pixels. #' This is the mapping for the set transform #' (the value at any pixel gives the new column number #' for the transformed pixel) CumulFalse <- t(apply(!M, 1L, cumsum)) #' discard one column for consistency with other matrices below CumulFalse <- CumulFalse[,-1L,drop=FALSE] #' ....... Find lower tangent points ................. #' compute discrete gradient in x direction G <- t(apply(M, 1, diff)) #' detect entries, exits, changes Exit <- (G == -1) Enter <- (G == 1) Change <- Exit | Enter #' form a running total of the number of pixels inside X #' to the **right** of the current pixel FutureInside <- t(apply(M, 1, sumtoright))[,-1L,drop=FALSE] #' find locations of changes loc <- which(Change, arr.ind=TRUE) #' don't consider entries/exits in the bottom row ok <- (loc[,"row"] > 1) loc <- loc[ok, , drop=FALSE] #' corresponding locations on horizontal line below current line below <- cbind(loc[,"row"]-1L, loc[,"col"]) #' look up data at these locations df <- data.frame(row=loc[,"row"], col=loc[,"col"], newcol=CumulFalse[loc], Exit=Exit[loc], Enter=Enter[loc], InsideBelow=M[below], FutureInsideBelow=FutureInside[below]) #' identify candidates for tangents df$IsCandidate <- with(df, Enter & !InsideBelow & (newcol < TotFalse[row])) #' collect data for each horizontal line (row) #' then sort by increasing x (column) within each line. oo <- with(df, order(row, col)) df <- df[oo, , drop=FALSE] #' divide data into one piece for each hztal line g <- split(df, df$row) #' Initialise empty list of tangent points tangents <- data.frame(row=integer(0), col=integer(0), newcol=integer(0)) #' process each hztal line for(p in g) { tangents <- with(p, { candidates <- which(IsCandidate) # indices are row numbers in 'p' if(verbose) print(p) exits <- which(Exit) for(i in candidates) { if(verbose) cat(paste("candidate", i, "\n")) if(any(found <- (exits > i))) { j <- exits[min(which(found))] if(verbose) cat(paste("next exit:", j, "\n")) #' check no pixels inside X in row below between i and j if(FutureInsideBelow[i] == FutureInsideBelow[j]) { if(verbose) cat(paste("Tangent (1) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } else { #' no exits on this row if(verbose) cat("no subsequent exit\n") if(FutureInsideBelow[i] == 0) { if(verbose) cat(paste("Tangent (2) at row=", row[i], "col=", col[i], "\n")) tangents <- rbind(tangents, data.frame(row=row[i], col=col[i], newcol=newcol[i])) } } } if(verbose) cat("====\n") tangents }) } tangents$oldx <- X$xcol[tangents$col] tangents$newx <- X$xcol[tangents$newcol] tangents$y <- X$yrow[tangents$row] TanOld <- with(tangents, ppp(oldx, y, window=Frame(X), unitname=unitX)) TanNew <- with(tangents, ppp(newx, y, window=Laz), unitname=unitX) result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=tangents) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } maskLaslett }) print.laslett <- function(x, ...) { cat("Laslett Transform\n") cat("\nOriginal object:\n") print(x$oldX) cat("\nTransformed set:\n") W <- Window(x$TanNew) print(W) unitinfo <- summary(unitname(W)) cat("\nTransformed area:", area.owin(W), "square", unitinfo$plural, unitinfo$explain, fill=TRUE) cat("\n") type <- x$type %orifnull% "lower" cat(npoints(x$TanNew), type, "tangent points found.", fill=TRUE) return(invisible(NULL)) } plot.laslett <- function(x, ..., Xpars=list(box=TRUE, col="grey"), pointpars=list(pch=3, cols="blue"), rectpars=list(lty=3, border="green")) { Display <- with(x, solist(Original= layered(oldX, TanOld, plotargs=list(Xpars, pointpars)), Transformed= layered(TanNew, Rect, plotargs=list(pointpars, rectpars)))) #' ignore arguments intended for as.mask argh <- list(...) if(any(bad <- names(argh) %in% c("eps", "dimyx", "xy", "rule.eps"))) argh <- argh[!bad] dont.complain.about(Display) do.call(plot, resolve.defaults(list(x=quote(Display)), argh, list(main="", mar.panel=0, hsep=1, equal.scales=TRUE))) return(invisible(NULL)) } polyLaslett <- function(X, ..., oldX=X, verbose=FALSE, plotit=TRUE) { X <- as.polygonal(X) if(is.empty(X)) stop("Empty window!") unitX <- unitname(X) # expand frame slightly B <- Frame(X) B <- grow.rectangle(B, max(sidelengths(B))/8) x0 <- B$xrange[1L] x1 <- B$xrange[2L] # extract vertices v <- vertices(X) nv <- length(v$x) # .......... compute transformed set ..................... # make horizontal segments from each vertex to sides of box left <- with(v, psp(rep(x0,nv), y, x, y, window=B, marks=1:nv, check=FALSE)) right <- with(v, psp(x, y, rep(x1,nv), y, window=B, marks=1:nv, check=FALSE)) # intersect each horizontal segment with the window if(verbose) cat("Processing", nv, "polygon vertices... ") clipleft <- clip.psp(left, X) clipright <- clip.psp(right, X) if(verbose) cat("Done.\n") # calculate lengths of clipped segments, and group by vertex. # marks indicate which hztal segment was the parent of each piece. lenleft <- tapply(lengths_psp(clipleft), factor(marks(clipleft), levels=1:nv), sum) lenright <- tapply(lengths_psp(clipright), factor(marks(clipright), levels=1:nv), sum) lenleft[is.na(lenleft)] <- 0 lenright[is.na(lenright)] <- 0 emptylenleft <- lengths_psp(left) - lenleft emptylenright <- lengths_psp(right) - lenright # The transformed polygon isrightmost <- (lenright == 0) yright <- v$y[isrightmost] xright <- x0 + (emptylenleft+emptylenright)[isrightmost] minxright <- min(xright) # right margin of largest rectangle ord <- order(yright) Ty <- yright[ord] Tx <- xright[ord] nT <- length(Ty) if(Tx[nT] > x0) { Ty <- c(Ty, Ty[nT]) Tx <- c(Tx, x0) } if(Tx[1L] > x0) { Ty <- c(Ty[1L], Ty) Tx <- c(x0, Tx) } TX <- owin(B$xrange, B$yrange, poly=list(x=Tx, y=Ty), check=FALSE) TX <- TX[Frame(X)] # .......... identify lower tangents ..................... V <- as.ppp(v, W=Frame(X), unitname=unitX) is.candidate <- is.tangent <- logical(nv) # apply simple criteria for ruling in or out Plist <- X$bdry cumnv <- 0 for(i in seq_along(Plist)) { P <- Plist[[i]] xx <- P$x yy <- P$y nn <- length(xx) # xnext <- c(xx[-1L], xx[1L]) ynext <- c(yy[-1L], yy[1L]) # xprev <- c(xx[nn], xx[-nn]) yprev <- c(yy[nn], yy[-nn]) is.candidate[cumnv + seq_len(nn)] <- if(!is.hole.xypolygon(P)) { (yprev > yy & ynext >= yy) } else { (yprev >= yy & ynext > yy) } cumnv <- cumnv + nn } ## was.candidate <- is.candidate #' reject candidates lying too close to boundary tooclose <- (bdist.points(V[is.candidate]) < diameter(Frame(V))/1000) is.candidate[is.candidate][tooclose] <- FALSE #' evaluate candidate points #' make tiny boxes around vertex candidates <- which(is.candidate) nc <- length(candidates) nnd <- nndist(V) if(verbose) { cat(paste("Processing", nc, "tangent candidates ... ")) pstate <- list() } tiny <- .Machine$double.eps for(j in 1:nc) { i <- candidates[j] eps <- nnd[i]/16 xi <- v$x[i] yi <- v$y[i] Below <- owinInternalRect(xi + c(-eps,eps), yi + c(-eps, 0)) # Above <- owinInternalRect(xi + c(-eps, eps), yi + c(0, eps)) UpLeft <- owinInternalRect(xi + c(-eps, 0), yi + c(0, eps)) is.tangent[i] <- (overlap.owin(X, Below) <= tiny) && (overlap.owin(X, UpLeft) < eps^2) if(verbose) pstate <- progressreport(j, nc, state=pstate) } if(verbose) cat(paste("Found", sum(is.tangent), "tangents\n")) TanOld <- V[is.tangent] ynew <- TanOld$y xnew <- x0 + emptylenleft[is.tangent] TanNew <- ppp(xnew, ynew, window=TX, check=FALSE, unitname=unitX) # maximal rectangle Rect <- owinInternalRect(c(X$xrange[1L], minxright), X$yrange, unitname=unitX) # df <- data.frame(xold=TanOld$x, xnew=TanNew$x, y=TanNew$y) # result <- list(oldX=oldX, TanOld=TanOld, TanNew=TanNew, Rect=Rect, df=df) class(result) <- c("laslett", class(result)) if(plotit) plot(result, ...) return(result) } spatstat.explore/R/bw.optim.R0000644000176200001440000001510114611073307015655 0ustar liggesusers# # bw.optim.R # # Class of optimised bandwidths # Plotting the object displays the optimisation criterion # # $Revision: 1.37 $ $Date: 2024/01/29 07:09:03 $ # bw.optim <- function(cv, h, iopt=if(optimum == "min") which.min(cv) else which.max(cv), ..., cvname, hname, criterion="cross-validation", optimum = c("min", "max"), warnextreme=TRUE, hargnames=NULL, yexp=NULL, unitname=NULL, template=NULL, exponent=1, hword) { if(missing(cvname) || is.null(cvname)) cvname <- short.deparse(substitute(cv)) if(missing(hname) || is.null(hname)) hname <- short.deparse(substitute(h)) stopifnot(is.numeric(cv)) stopifnot(is.numeric(h)) stopifnot(length(h) == length(cv)) optimum <- match.arg(optimum) result <- h[iopt] if(warnextreme) { optimised <- switch(optimum, min="minimised", max="maximised") if(is.infinite(result)) { warning(paste(criterion, "criterion was", optimised, "at", hname, "=", as.numeric(result)), call.=FALSE) } else if((iopt == length(h) || iopt == 1)) { warning(paste(criterion, "criterion was", optimised, "at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", paste0(prange(signif(range(h[is.finite(h)]), 3)), ";"), "use", ngettext(length(hargnames), "argument", "arguments"), paste(sQuote(hargnames), collapse=", "), "to specify a wider interval for bandwidth", sQuote(hname)), call.=FALSE) } } if(missing(hword)) hword <- if(is.null(template)) "bandwidth" else "scale factor" attr(result, "cv") <- cv attr(result, "h") <- h attr(result, "iopt") <- iopt attr(result, "labels") <- list(hname=hname, cvname=cvname) attr(result, "info") <- list(...) attr(result, "criterion") <- criterion attr(result, "optimum") <- optimum attr(result, "hargnames") <- hargnames attr(result, "units") <- as.unitname(unitname) attr(result, "yexp") <- yexp attr(result, "template") <- template attr(result, "exponent") <- exponent %orifnull% 1 attr(result, "hword") <- hword class(result) <- "bw.optim" return(result) } print.bw.optim <- function(x, ...) { y <- as.numeric(x) names(y) <- hname <- attr(x, "labels")$hname print(y, ...) if(!is.null(m <- attr(x, "template"))) { exponent <- attr(x, "exponent") %orifnull% 1 hpow <- if(exponent == 1) hname else paste0(paren(hname), "^", exponent) cat("\n") splat(hpow, "is interpreted as a multiple of:") print(m) } return(invisible(NULL)) } summary.bw.optim <- function(object, ...) { z <- attributes(object) z$hopt <- hopt <- as.numeric(object) z$is.extreme <- is.infinite(hopt) || with(z, iopt == 1 || iopt == length(h)) structure(z, class="summary.bw.optim") } print.summary.bw.optim <- function(x, ..., digits=3) { hword <- x$hword %orifnull% "bandwidth" Hword <- paste0(toupper(substring(hword, 1, 1)), substring(hword, 2)) splat(Hword, "value selected by", x$criterion) su <- summary(x$units) splat("Optimal value:", x$labels$hname, "=", signif(x$hopt, digits=digits), if(x$hopt == 1) su$singular else su$plural, su$explain) splat("Search performed over", length(x$h), "candidate values of", hword, "in the interval", prange(signif(range(x$h), digits=digits))) optname <- if(is.null(x$optimum)) "Optimum" else switch(x$optimum, min="Minimum", max="Maximum", x$optimum) splat(optname, "value of criterion", paste0(x$labels$cvname, ":"), signif(x$cv[x$iopt], digits=digits)) if(isTRUE(x$is.extreme)) { splat(optname, "achieved at", if(is.infinite(x$hopt)) "infinity" else if(x$iopt == 1) "lower limit of range" else "upper limit of range") } if(!is.null(creator <- x$info$creator)) splat("Computed by the function", sQuote(creator)) if(!is.null(tem <- x$template)) { exponent <- x$exponent Hpow <- if(exponent == 1) Hword else paste0(paren(Hword), "^", exponent) splat(Hpow, "is interpreted as a multiplier of:") print(tem) } return(invisible(NULL)) } as.data.frame.bw.optim <- function(x, ...) { h <- attr(x, "h") cv <- attr(x, "cv") df <- data.frame(h, cv) labels <- attr(x, "labels") colnames(df) <- labels[c("hname", "cvname")] info <- attr(x, "info") if(length(info) > 0) { lenfs <- lengths(info) if(any(ok <- (lenfs == nrow(df)))) { df <- cbind(df, as.data.frame(info[ok])) } } return(df) } as.fv.bw.optim <- function(x) { # convert to fv object df <- as.data.frame(x) dfnames <- colnames(df) hname <- dfnames[1L] cvname <- dfnames[2L] descrip <- c("smoothing parameter", paste(attr(x, "criterion"), "criterion")) if(ncol(df) > 2) descrip <- c(descrip, paste("Additional variable", sQuote(dfnames[-(1:2)]))) labl <- c(hname, paste0(dfnames[-1L], paren(hname))) yexp <- attr(x, "yexp") %orifnull% substitute(CV(h), list(CV=as.name(cvname), h=as.name(hname))) xfv <- fv(df, argu=hname, ylab=yexp, valu=cvname, labl=labl, desc=descrip, fname=cvname, yexp=yexp) fvnames(xfv, ".") <- cvname unitname(xfv) <- unitname(x) return(xfv) } plot.bw.optim <- function(x, ..., showopt=TRUE, optargs=list(lty=3, col="blue")) { xname <- short.deparse(substitute(x)) # convert to fv object xfv <- as.fv(x) # plot cross-validation criterion out <- do.call(plot.fv, resolve.defaults(list(x=quote(xfv)), list(...), list(main=xname))) # Turn off 'showopt' if the x-variable is not the bandwidth if(missing(showopt)) { argh <- list(...) isfmla <- unlist(lapply(argh, inherits, what="formula")) if(any(isfmla)) { fmla <- argh[[min(which(isfmla))]] xvar <- deparse(rhs.of.formula(fmla, tilde=FALSE)) if(!(identical(xvar, fvnames(xfv, ".x")) || identical(xvar, ".x"))) showopt <- FALSE } } # show optimal value? if(showopt) { hoptim <- as.numeric(x) if(spatstat.options('monochrome')) optargs <- col.args.to.grey(optargs) do.call(abline, append(list(v=hoptim), optargs)) } if(is.null(out)) return(invisible(NULL)) return(out) } spatstat.explore/R/FGmultiInhom.R0000644000176200001440000002352414611073307016470 0ustar liggesusers#' #' FGmultiInhom.R #' #' inhomogeneous multitype G and F functions #' #' Original code by Ottmar Cronie and Marie-Colette van Lieshout #' #' Rewritten for spatstat by Adrian Baddeley #' #' GmultiInhom #' FmultiInhom #' #' Gcross.inhom #' Gdot.inhom #' #' Copyright (c) 2016-2023 O. Cronie, M.N.M. van Lieshout, A.J. Baddeley #' GNU Public Licence GPL >= 2.0 #' #' $Revision: 1.14 $ $Date: 2023/04/09 10:08:30 $ Gmulti.inhom <- GmultiInhom <- function(X, I, J, lambda=NULL, lambdaI=NULL, lambdaJ=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE){ if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") W <- Window(X) nX <- npoints(X) #' handle r argument rmax <- rmax.rule("G", W, intensity(X)) bks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmax) r <- bks$r rmax <- bks$max nr <- length(r) #' Accept any kind of index for I; convert it to a logical index I <- ppsubset(X, I, "I") if(is.null(I)) stop("I must be a valid subset index") XI <- X[I] nI <- sum(I) if (nI == 0) stop("No points satisfy condition I") if(!is.null(ReferenceMeasureMarkSetI)) { check.1.real(ReferenceMeasureMarkSetI) stopifnot(ReferenceMeasureMarkSetI >= 0) } #' likewise for J if(missing(J) || is.null(J)) { J <- rep(TRUE, nX) } else { J <- ppsubset(X, J, "J") if(is.null(J)) stop("J must be a valid subset index") } XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") #' supply either lambda, or lambdaI and lambdaJ lam.given <- !is.null(lambda) lamIJ.given <- !is.null(lambdaI) || !is.null(lambdaJ) if(lam.given == lamIJ.given || is.null(lambdaI) != is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or two vectors lambdaI, lambdaJ of lengths", "equal to npoints(X[I]) and npoints(X[J]) respectively"), call.=FALSE) if(lamIJ.given) { #' lambdaI and lambdaJ given check.nvector(lambdaI, nI, things="points of X[I]", vname="lambdaI") stopifnot(all(lambdaI > 0)) check.nvector(lambdaJ, nJ, things="points of X[J]", vname="lambdaJ") stopifnot(all(lambdaJ > 0)) if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambdaJ"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambdaJ)) } else { #' lambda given check.nvector(lambda, nX, things="points of X", vname="lambda") stopifnot(all(lambda > 0)) lambdaI <- lambda[I] lambdaJ <- lambda[J] if(is.null(lambdamin)){ stop(paste("Supply lambdamin - a single positive number which is", "smaller than the values in lambda"), call.=FALSE) } check.1.real(lambdamin) stopifnot(lambdamin > 0) stopifnot(lambdamin <= min(lambda)) } #' Calculate 1/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in I invlambdaI <- 1/lambdaI #' Calculate (1 - lambda_min/lambda(x_i,y_i,m_i)) #' for all (x_i,y_i,m_i) with m_i in J Coeff <- 1-(lambdamin/lambdaJ) ## CoeffMatrix <- matrix(rep(Coeff,times=nI), nrow=nI, byrow=TRUE) #' distances ## DistanceXItoXJ <- crossdist(XI,XJ) #' eroded areas and boundary distances areaWr <- eroded.areas(W, r) bdistXI <- bdist.points(XI) #' for each point x in XI, determine largest r such that x \in W-r ibI <- fastFindInterval(bdistXI, r, labels=TRUE) #' count of points inside W-r for each r ## NumberEroded <- revcumsum(table(ibI)) #' denominator #' sum invlambdaI for all points x \in W-r DenominatorN <- c(sum(invlambdaI), revcumsum(natozero(tapply(invlambdaI, ibI, sum)))) if(!is.null(ReferenceMeasureMarkSetI)) DenominatorA <- areaWr * ReferenceMeasureMarkSetI #' local products of weights #' sort data points in order of increasing x coordinate xxI <- XI$x yyI <- XI$y oXI <- fave.order(xxI) xIord <- xxI[oXI] yIord <- yyI[oXI] #' xxJ <- XJ$x yyJ <- XJ$y vvJ <- Coeff oXJ <- fave.order(xxJ) xJord <- xxJ[oXJ] yJord <- yyJ[oXJ] vJord <- vvJ[oXJ] # compute local cumulative products z <- .C(SE_locxprod, ntest = as.integer(nI), xtest = as.double(xIord), ytest = as.double(yIord), ndata = as.integer(nJ), xdata = as.double(xJord), ydata = as.double(yJord), vdata = as.double(vJord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nI * nr)), PACKAGE="spatstat.explore") ans <- matrix(z$ans, nrow=nr, ncol=nI) #' revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=nI) loccumprod[, oXI] <- ans #' border correction outside <- outer(r, bdistXI, ">") loccumprod[outside] <- 0 #' weight by 1/lambdaI wlcp <- loccumprod * matrix(invlambdaI, byrow=TRUE, nr, nI) #' sum over I for each fixed r numer <- .rowSums(wlcp, nr, nI) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lambdamin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(nI, nr) fname <- c("G", "list(inhom,I,J)") G <- ratfv(Gdf, NULL, theo.denom, "r", quote(G[inhom, I, J](r)), "theo", NULL, c(0,rmax), c("r", makefvlabel(NULL, NULL, fname, "pois")), desc, fname=fname, yexp=quote(G[list(inhom,I,J)](r)), ratio=ratio) # add border corrected (Hamilton Principle) estimate G <- bind.ratfv(G, data.frame(bord=DenominatorN-numer), DenominatorN, makefvlabel(NULL, "hat", fname, "bord"), "border estimate of %s", "bord", ratio=ratio) fvnames(G, ".") <- c("bord", "theo") # add modified border corrected (non-Hamilton-Principle) estimate if(!is.null(ReferenceMeasureMarkSetI)) { G <- bind.ratfv(G, data.frame(bordm=DenominatorA-numer), DenominatorA, makefvlabel(NULL, "hat", fname, "bordm"), "modified border estimate of %s", "bordm", ratio=ratio) fvnames(G, ".") <- c("bord", "bordm", "theo") } # formula(G) <- . ~ r unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) return(G) } #' marked inhomogeneous F Fmulti.inhom <- FmultiInhom <- function(X, J, lambda=NULL,lambdaJ=NULL, lambdamin=NULL, ..., r=NULL) { if(!is.ppp(X) || !is.marked(X)) stop("X should be a marked point pattern") nX <- npoints(X) #' Accept any kind of index for J; convert it to a logical index J <- ppsubset(X, J, "J") if(is.null(J)) stop("J must be a valid subset index") XJ <- X[J] nJ <- sum(J) if (nJ == 0) stop("No points satisfy condition J") if(is.null(lambda) == is.null(lambdaJ)) stop(paste("Supply either a vector lambda of length equal to npoints(X),", "or a vector lambdaJ of length equal to npoints(X[J])"), call.=FALSE) if(is.null(lambdamin)) stop("Supply a value for lambdamin", call.=FALSE) check.1.real(lambdamin) if(!is.null(lambda)) { check.nvector(lambda, nX, vname="lambda") stopifnot(all(lambda > 0)) stopifnot(lambdamin <= min(lambda[J])) lambdaJ <- lambda[J] } else { check.nvector(lambdaJ, nJ, vname="lambdaJ") stopifnot(all(lambdaJ > 0)) stopifnot(lambdamin <= min(lambdaJ)) } FJ <- Finhom(XJ, lambda=lambdaJ, lmin=lambdamin, r=r, ...) conserve <- attr(FJ, "conserve") FJ <- rebadge.fv(FJ, new.ylab = quote(F[inhom, J](r)), new.fname = c("F", "list(inhom,J)"), new.yexp = quote(F[list(inhom,J)](r))) attr(FJ, "conserve") <- conserve return(FJ) } ## derived functions Gcross.inhom, Gdot.inhom Gcross.inhom <- function(X, i, j, lambda=NULL, lambdaI=NULL, lambdaJ=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) if(!any(I)) stop(paste("There are no points of type", sQuote(i))) if(!any(J)) stop(paste("There are no points of type", sQuote(j))) G <- Gmulti.inhom(X, I, J, lambda, lambdaI, lambdaJ, lambdamin, ..., r=r, ReferenceMeasureMarkSetI=ReferenceMeasureMarkSetI, ratio=ratio) G <- rebadge.as.crossfun(G, "G", "inhom", i, j) return(G) } Gdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) if(!any(I)) stop(paste("There are no points of type", sQuote(i))) J <- rep.int(TRUE, npoints(X)) G <- Gmulti.inhom(X, I, J, lambdaI=lambdaI, lambdaJ=lambdadot, lambdamin=lambdamin, ..., r=r, ReferenceMeasureMarkSetI=ReferenceMeasureMarkSetI, ratio=ratio) G <- rebadge.as.dotfun(G, "G", "inhom", i) return(G) } spatstat.explore/R/nnorient.R0000644000176200001440000001024014611073310015743 0ustar liggesusers## ## nnorient.R ## ## nearest neighbour pair orientation distribution ## ## Function \vartheta(phi) defined in ## Illian et al (2008) equ (4.5.3) page 253 ## ## $Revision: 1.4 $ $Date: 2018/10/02 01:21:40 $ nnorient <- function(X, ..., cumulative=FALSE, correction, k = 1, unit=c("degree", "radian"), domain=NULL, ratio=FALSE) { stopifnot(is.ppp(X)) check.1.integer(k) stopifnot(k>=1) W <- Window(X) if(!is.null(domain)) stopifnot(is.subset.owin(domain, W)) unit <- match.arg(unit) switch(unit, degree = { FullCircle <- 360 Convert <- 180/pi }, radian = { FullCircle <- 2 * pi Convert <- 1 }) ## choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("bord.modif", "none") correction <- pickoption("correction", correction, c(none="none", bord.modif="bord.modif", good="good", best="best"), multi=TRUE) correction[correction %in% c("good", "best")] <- "bord.modif" ## process point pattern Xcoord <- coords(X) Ycoord <- Xcoord[nnwhich(X, k=k), ] if(!is.null(domain)) { inD <- inside.owin(Xcoord$x, Xcoord$y, domain) Xcoord <- Xcoord[inD,] Ycoord <- Ycoord[inD,] } dYX <- Ycoord-Xcoord ANGLE <- with(dYX, atan2(y, x) * Convert) %% FullCircle nangles <- length(ANGLE) ## initialise output object Nphi <- 512 breaks <- make.even.breaks(bmax=FullCircle, npos=Nphi-1) phi <- breaks$r Odf <- data.frame(phi = phi, theo = (if(cumulative) phi else 1)/FullCircle) desc <- c("angle argument phi", "theoretical isotropic %s") NOletter <- if(cumulative) "Theta" else "vartheta" NOsymbol <- as.name(NOletter) NNO <- ratfv(Odf, NULL, denom=nangles, argu="phi", ylab=substitute(fn(phi), list(fn=NOsymbol)), valu="theo", fmla = . ~ phi, alim = c(0, FullCircle), c("phi", "{%s[%s]^{pois}}(phi)"), desc, fname=NOletter, yexp=substitute(fn(phi), list(fn=NOsymbol))) ## ^^^^^^^^^^^^^^^ Compute edge corrected estimates ^^^^^^^^^^^^^^^^ if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! if(cumulative) { wh <- whist(ANGLE, breaks$val) # no weights num.un <- cumsum(wh) } else { kd <- circdensity(ANGLE, ..., n=Nphi, unit=unit) num.un <- kd$y * nangles } den.un <- nangles ## uncorrected estimate NNO <- bind.ratfv(NNO, data.frame(un=num.un), den.un, "{hat(%s)[%s]^{un}}(phi)", "uncorrected estimate of %s", "un", ratio=ratio) } if("bord.modif" %in% correction) { ## border type correction bX <- bdist.points(X) nndX <- nndist(X, k=k) if(!is.null(domain)) { bX <- bX[inD] nndX <- nndX[inD] } ok <- (nndX < bX) nok <- sum(ok) rr <- seq(0, max(bX), length=256) if(nok == 0) { num.bm <- numeric(Nphi) # i.e. rep(0, Nphi) } else { Ar <- eroded.areas(W, rr) Arf <- approxfun(rr, Ar, rule=2) AI <- Arf(bX) edgewt <- ifelse(ok, pmin(area(W)/AI, 100), 0) if(cumulative) { wh <- whist(ANGLE, breaks$val, edgewt) num.bm <- cumsum(wh)/mean(edgewt) } else { w <- edgewt/sum(edgewt) kd <- circdensity(ANGLE, ..., weights=w, n=Nphi, unit=unit) num.bm <- kd$y * nok } } den.bm <- nok NNO <- bind.ratfv(NNO, data.frame(bordm=num.bm), den.bm, "{hat(%s)[%s]^{bordm}}(phi)", "modified border-corrected estimate of %s", "bordm", ratio=ratio) } unitname(NNO) <- switch(unit, degree = c("degree", "degrees"), radian = c("radian", "radians")) return(NNO) } spatstat.explore/R/Jmulti.inhom.R0000644000176200001440000001004614611073307016476 0ustar liggesusers#' #' Jmulti.inhom.R #' #' Inhomogeneous multitype J function #' #' original code by Jonatan Gonzalez #' Edited for spatstat by Adrian Baddeley #' #' JmultiInhom #' Jdot.inhom #' Jcross.inhom #' #' $Revision: 1.7 $ $Date: 2023/04/10 03:11:50 $ Jmulti.inhom <- function(X, I, J, lambda=NULL, lambdaI=NULL, lambdaJ=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI=NULL, ratio=FALSE){ ## compute multitype inhomogeneous G ## (including determination of r and validation of lmin) GIJ <- GmultiInhom(X, I, J, lambda, lambdaI, lambdaJ, lambdamin, ..., r=r, ReferenceMeasureMarkSetI=ReferenceMeasureMarkSetI, ratio=ratio) ## compute multitype inhomogeneous F FJ <- FmultiInhom(X, J, lambda, lambdaJ, lambdamin, ..., r = GIJ$r) ## evaluate inhomogeneous J function if(!ratio) { JIJ <- eval.fv((1 - GIJ) / (1 - FJ)) } else { num <- eval.fv(1 - GIJ) den <- eval.fv(1 - FJ) JIJ <- eval.fv(num / den) JIJ <- rat(JIJ, num, den) } ## relabel the fv object JIJ <- rebadge.fv(JIJ, new.ylab = quote(J[inhom, I, J](r)), new.fname = c("J", "list(inhom,I,J)"), tags = names(JIJ), new.labl = attr(GIJ, "labl"), new.yexp = quote(J[list(inhom, I, J)](r))) ## tack on extra info attr(JIJ, "G") <- GIJ attr(JIJ, "F") <- FJ attr(JIJ, "dangerous") <- attr(GIJ, "dangerous") attr(JIJ, "conserve") <- append(attr(GIJ, "conserve"), attr(FJ, "conserve")) return(JIJ) } Jdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, lambdamin=NULL, ..., r=NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE){ verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X, dfok = FALSE) if(missing(i) || is.null(i)) i <- levels(marx)[1] I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) J <- rep.int(TRUE, npoints(X)) result <- Jmulti.inhom(X, I, J, lambdaI=lambdaI, lambdaJ=lambdadot, lambdamin=lambdamin, ..., r = r, ReferenceMeasureMarkSetI = ReferenceMeasureMarkSetI, ratio = ratio) conserve <- attr(result, "conserve") result <- rebadge.as.dotfun(result, "J", "inhom", i) attr(result, "conserve") <- conserve return(result) } Jcross.inhom <- function(X, i, j, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, ..., r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X, dfok=FALSE) if(missing(i) || is.null(i)) i <- levels(marx)[1] if(missing(j) || is.null(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) if(sum(I) == 0) stop(paste("No points have mark = ", i)) if(sum(J) == 0) stop(paste("No points have mark = ", j)) result <- Jmulti.inhom(X, I, J, lambda, lambdaI, lambdaJ, lambdamin, ..., r=r, ReferenceMeasureMarkSetI=ReferenceMeasureMarkSetI, ratio=ratio) conserve <- attr(result, "conserve") result <- rebadge.as.crossfun(result, "J", "inhom", i, j) attr(result, "conserve") <- conserve return(result) } spatstat.explore/R/density.psp.R0000644000176200001440000001202514611073310016372 0ustar liggesusers# # # density.psp.R # # $Revision: 1.21 $ $Date: 2022/05/21 08:55:03 $ # # density.psp <- function(x, sigma, ..., weights=NULL, edge=TRUE, method=c("FFT", "C", "interpreted"), at=NULL) { verifyclass(x, "psp") method <- match.arg(method) w <- x$window n <- x$n if(length(weights)) { check.nvector(weights, n, things="segments", oneok=TRUE, vname="weights") if(length(weights) == 1) weights <- rep(weights, n) } else weights <- NULL len <- lengths_psp(x) ang <- angles.psp(x, directed=TRUE) ux <- unitname(x) if(missing(sigma)) sigma <- 0.1 * diameter(w) #' determine locations for evaluation of density if(is.null(at)) { atype <- "window" w <- do.call.matched(as.mask, resolve.defaults(list(w=quote(w), ...))) } else if(is.owin(at)) { atype <- "window" w <- do.call.matched(as.mask, resolve.defaults(list(w=quote(at), ...))) } else { atype <- "points" atY <- try(as.ppp(at, W=w)) if(inherits(atY, "try-error")) stop("Argument 'at' should be a window or a point pattern", call.=FALSE) } #' detect empty pattern if(n == 0 || all(len == 0)) switch(atype, window = return(as.im(0, w)), points = return(rep(0, npoints(atY)))) #' determine prediction coordinates switch(atype, window = { xy <- rasterxy.mask(w) xx <- xy$x yy <- xy$y }, points = { xx <- atY$x yy <- atY$y }) #' c o m p u t e switch(method, interpreted = { #' compute matrix contribution from each segment coz <- cos(ang) zin <- sin(ang) if(is.null(weights)) { #' unweighted for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1L) value else (value + totvalue) } } else { #' weighted for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- weights[i] * dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1L) value else (value + totvalue) } } dens <- switch(atype, window = im(totvalue, w$xcol, w$yrow, unitname=ux), points = totvalue) }, C = { #' C implementation of the above xs <- x$ends$x0 ys <- x$ends$y0 xp <- as.numeric(as.vector(xx)) yp <- as.numeric(as.vector(yy)) np <- length(xp) if(is.null(weights)) { #' unweighted z <- .C(SE_segdens, sigma = as.double(sigma), ns = as.integer(n), xs = as.double(xs), ys = as.double(ys), alps = as.double(ang), lens = as.double(len), np = as.integer(np), xp = as.double(xp), yp = as.double(yp), z = as.double(numeric(np)), PACKAGE="spatstat.explore") } else { #' weighted z <- .C(SE_segwdens, sigma = as.double(sigma), ns = as.integer(n), xs = as.double(xs), ys = as.double(ys), alps = as.double(ang), lens = as.double(len), ws = as.double(weights), np = as.integer(np), xp = as.double(xp), yp = as.double(yp), z = as.double(numeric(np)), PACKAGE="spatstat.explore") } dens <- switch(atype, window = im(z$z, w$xcol, w$yrow, unitname=ux), points = z$z) }, FFT = { Y <- pixellate(x, ..., weights=weights, DivideByPixelArea=TRUE) dens <- blur(Y, sigma, normalise=edge, bleed=FALSE, ...) if(atype == "points") dens <- dens[atY, drop=FALSE] }) if(edge && method != "FFT") { edg <- second.moment.calc(midpoints.psp(x), sigma, what="edge", ...) switch(atype, window = { dens <- eval.im(dens/edg) }, points = { edgY <- edg[atY, drop=FALSE] dens <- dens/edgY }) } if(atype == "window") dens <- dens[x$window, drop=FALSE] attr(dens, "sigma") <- sigma return(dens) } spatstat.explore/R/hasenvelope.R0000644000176200001440000000120514611073310016421 0ustar liggesusers#' #' hasenvelope.R #' #' A simple class of objects which contain additional envelope data #' #' $Revision: 1.1 $ $Date: 2015/10/05 06:20:31 $ hasenvelope <- function(X, E=NULL) { if(inherits(E, "envelope")) { attr(X, "envelope") <- E class(X) <- c("hasenvelope", class(X)) } return(X) } print.hasenvelope <- function(x, ...) { NextMethod("print") splat("[Object contains simulation envelope data]") return(invisible(NULL)) } envelope.hasenvelope <- function(Y, ..., Yname=NULL) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) E <- attr(Y, "envelope") return(envelope(E, ..., Yname=Yname)) } spatstat.explore/R/Kscaled.R0000644000176200001440000001274014611073307015472 0ustar liggesusers# # Kscaled.R Estimation of K function for locally-scaled process # # $Revision: 1.18 $ $Date: 2022/05/23 00:14:56 $ # "Lscaled" <- function(...) { K <- Kscaled(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[scaled](r)), c("L","scaled")) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") return(L) } "Kscaled"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") # rfixed <- !missing(r) || !missing(breaks) ## determine basic parameters W <- X$window npts <- X$n areaW <- area(W) halfdiameter <- diameter(W)/2 ## match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### ## DETERMINE WEIGHTS AND VALIDATE ## a <- resolve.lambda(X, lambda=lambda, sigma=sigma, varcov=varcov, ..., check=TRUE) lambda <- a$lambda danger <- a$danger dangerous <- a$dangerous check.nvector(lambda, npts, vname="lambda") if(renormalise) { ## renormalise. Here we only need half the power ;-) check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (areaW/sum(1/lambda))^(normpower/2) lambda <- lambda/renorm.factor } ## Calculate range of r values using max lambda sra <- sqrt(range(lambda)) minrescale <- sra[1] maxrescale <- sra[2] ## convert arguments to absolute distances absr <- if(!is.null(r)) r/maxrescale else NULL absrmaxdefault <- min(rmax.rule("K", W), rmax/maxrescale) absbreaks <- if(!is.null(breaks)) scalardilate(breaks, 1/maxrescale) else NULL ## determine absolute distances absbreaks <- handle.r.b.args(absr, absbreaks, W, rmaxdefault=absrmaxdefault) absr <- absbreaks$r ## convert to rescaled distances breaks <- scalardilate(absbreaks, maxrescale) r <- breaks$r rmax <- breaks$max ## recommended range of scaled r values alim <- c(0, min(rmax, maxrescale * absrmaxdefault)) rthresh <- minrescale * halfdiameter ## maximum absolute distance ever needed maxabsdist <- min(rmax/minrescale, halfdiameter) ## this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", quote(K[scaled](r)), "theo", , alim, c("r","{%s[%s]^{pois}}(r)"), desc, fname=c("K", "scaled")) ## identify all relevant close pairs needXI <- any(correction %in% c("translate", "isotropic")) close <- closepairs(X, maxabsdist, what=if(needXI) "all" else "ijd") I <- close$i J <- close$j ## locally-scaled distances sqrtLambda <- sqrt(lambda) lamIJ <- (sqrtLambda[I] + sqrtLambda[J])/2 absDIJ <- close$d DIJ <- absDIJ * lamIJ ## first point of each pair XI <- if(needXI) ppp(close$xi, close$yi, window=W, check=FALSE) else NULL if(any(correction == "none")) { ## uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/npts K <- bind.fv(K, data.frame(un=Kun), "{hat(%s)[%s]^{un}}(r)", "uncorrected estimate of %s", "un") } if(any(correction == "border")) { ## border method ## Compute SCALED distances to boundary b <- bdist.points(X) * sqrtLambda bI <- b[I] ## apply reduced sample algorithm to scaled distances RS <- Kount(DIJ, bI, b, breaks) Kb <- RS$numerator/RS$denom.count Kb[r > rthresh] <- NA K <- bind.fv(K, data.frame(border=Kb), "{hat(%s)[%s]^{bord}}(r)", "border-corrected estimate of %s", "border") } if(any(correction == "translate")) { ## translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/npts Ktrans[r >= rthresh] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "{hat(%s)[%s]^{trans}}(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { ## Ripley isotropic correction (using UN-SCALED distances) edgewt <- edge.Ripley(XI, matrix(absDIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/npts Kiso[r >= rthresh] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "{hat(%s)[%s]^{iso}}(r)", "Ripley isotropic correction estimate of %s", "iso") } ## default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- nama[!(nama %in% c("r", "rip", "ls"))] ## unitname(K) <- c("normalised unit", "normalised units") if(danger) attr(K, "dangerous") <- dangerous return(K) } spatstat.explore/R/fryplot.R0000644000176200001440000000502114611073310015607 0ustar liggesusers# # fryplot.R # # $Revision: 1.19 $ $Date: 2024/02/04 08:04:51 $ # fryplot <- function(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) b <- as.rectangle(X) halfspan <- with(b, c(diff(xrange), diff(yrange))) if(!is.null(width)) { halfwidth <- ensure2vector(width)/2 halfspan <- pmin.int(halfspan, halfwidth) } bb <- owinInternalRect(c(-1,1) * halfspan[1L], c(-1,1) * halfspan[2L]) Y <- frypoints(X, from=from, to=to, dmax=diameter(bb))[bb] dont.complain.about(Y) do.call(plot.ppp, resolve.defaults(list(x=quote(Y)), list(...), list(main=paste("Fry plot of", Xname)))) if(axes) { lines(c(0,0), c(-1,1) * halfspan[1L]) lines(c(-1,1) * halfspan[2L], c(0,0)) } return(invisible(NULL)) } frypoints <- function(X, from=NULL, to=NULL, dmax=Inf) { X <- as.ppp(X) b <- as.rectangle(X) bb <- owinInternalRect(c(-1,1) * diff(b$xrange), c(-1,1) * diff(b$yrange)) n <- X$n xx <- X$x yy <- X$y ## determine (dx, dy) for all relevant pairs if(is.null(from) && is.null(to)) { if(is.infinite(dmax)) { dx <- outer(xx, xx, "-") dy <- outer(yy, yy, "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] J <- col(notsame)[notsame] } else { cl <- closepairs(X, dmax) DX <- cl$dx DY <- cl$dy I <- cl$j ## sic: I is the index of the 'TO' element J <- cl$i ## sic } } else { seqn <- seq_len(n) from <- if(is.null(from)) seqn else seqn[from] to <- if(is.null(to)) seqn else seqn[to] if(is.infinite(dmax)) { dx <- outer(xx[to], xx[from], "-") dy <- outer(yy[to], yy[from], "-") notsame <- matrix(TRUE, n, n) diag(notsame) <- FALSE notsame <- notsame[to, from, drop=FALSE] DX <- as.vector(dx[notsame]) DY <- as.vector(dy[notsame]) I <- row(notsame)[notsame] J <- col(notsame)[notsame] } else { cl <- crosspairs(X[from], X[to], dmax) ok <- with(cl, from[i] != to[j]) DX <- cl$dx[ok] DY <- cl$dy[ok] I <- cl$j[ok] J <- cl$i[ok] } } ## form into point pattern Fry <- ppp(DX, DY, window=bb, check=FALSE) if(is.marked(X)) { marx <- as.data.frame(marks(X)) marxto <- if(is.null(to)) marx else marx[to, ,drop=FALSE] marks(Fry) <- marxto[I, ] } attr(Fry, "indices") <- list(I=I, J=J) return(Fry) } spatstat.explore/R/dclftest.R0000644000176200001440000003421314611073310015725 0ustar liggesusers# # dclftest.R # # $Revision: 1.47 $ $Date: 2023/02/28 01:57:06 $ # # Monte Carlo tests for CSR (etc) # # clf.test <- function(...) { # .Deprecated("dclf.test", package="spatstat") # dclf.test(...) # } dclf.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=2, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } mad.test <- function(X, ..., alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., exponent=Inf, alternative=alternative, rinterval=rinterval, leaveout=leaveout, scale=scale, clamp=clamp, interpolate=interpolate, Xname=Xname) } ## measure deviation of summary function ## leaveout = 0: typically 'ref' is exact theoretical value ## Compute raw deviation. ## leaveout = 1: 'ref' is mean of simulations *and* observed. ## Use algebra to compute leave-one-out deviation. ## leaveout = 2: 'ref' is mean of simulations ## Use algebra to compute leave-two-out deviation. Deviation <- function(x, ref, leaveout, n, xi=x) { if(leaveout == 0) return(x-ref) if(leaveout == 1) return((x-ref) * (n+1)/n) jackmean <- (n * ref - xi)/(n-1) return(x - jackmean) } ## Evaluate signed or absolute deviation, ## taking account of alternative hypothesis and possible scaling ## (Large positive values always favorable to alternative) RelevantDeviation <- local({ positivepart <- function(x) { d <- dim(x) y <- pmax(0, x) if(!is.null(d)) y <- matrix(y, d[1L], d[2L]) return(y) } negativepart <- function(x) positivepart(-x) RelevantDeviation <- function(x, alternative, clamp=FALSE, scaling=NULL) { if(!is.null(scaling)) x <- x/scaling switch(alternative, two.sided = abs(x), less = if(clamp) negativepart(x) else -x, greater = if(clamp) positivepart(x) else x) } RelevantDeviation }) ## workhorse function envelopeTest <- function(X, ..., exponent=1, alternative=c("two.sided", "less", "greater"), rinterval=NULL, leaveout=1, scale=NULL, clamp=FALSE, tie.rule=c("randomise","mean"), interpolate=FALSE, save.interpolant = TRUE, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE, Xname=NULL, badXfatal=TRUE, verbose=TRUE) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) tie.rule <- match.arg(tie.rule) alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") force(save.envelope) check.1.real(exponent) explain.ifnot(exponent >= 0) deviationtype <- switch(alternative, two.sided = "absolute", greater = if(clamp) "positive" else "signed", less = if(clamp) "negative" else "signed") deviationblurb <- paste(deviationtype, "deviation") ## compute or extract simulated functions X <- envelope(X, ..., savefuns=TRUE, savepatterns=savepatterns, Yname=Xname, verbose=verbose) Y <- attr(X, "simfuns") ## extract values r <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1L] nsim <- ncol(sim) nr <- length(r) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- apply(sim, 1L, mean, na.rm=TRUE) } else { ## use sample mean of simulations *and* observed reference <- apply(cbind(sim, obs), 1L, mean, na.rm=TRUE) } } ## determine interval of r values for computation if(is.null(rinterval)) { rinterval <- range(r) ok <- rep(TRUE, nr) first <- 1L } else { #' argument 'rinterval' specified check.range(rinterval) if(max(r) < rinterval[2L]) { oldrinterval <- rinterval rinterval <- intersect.ranges(rinterval, range(r), fatal=FALSE) if(is.null(rinterval)) stop(paste("The specified rinterval", prange(oldrinterval), "has empty intersection", "with the range of r values", prange(range(r)), "computed by the summary function"), call.=FALSE) if(verbose) warning(paste("The interval", prange(oldrinterval), "is too large for the available data;", "it has been trimmed to", prange(rinterval))) } ok <- (rinterval[1L] <= r & r <= rinterval[2L]) first <- min(which(ok)) } #' check for valid function values, and possibly adjust rinterval #' observed function values badr <- !is.finite(obs) if(badXfatal && all(badr)) stop("Observed function values are all infinite, NA or NaN", call.=FALSE) if(any(badr[ok])) { if(badr[first] && !any(badr[ok][-1L])) { ## ditch smallest r value (usually zero) ok[first] <- FALSE first <- first + 1L rmin <- r[first] if(verbose) warning(paste("Some function values were infinite, NA or NaN", "at distance r =", paste0(rinterval[1L], ";"), "lower limit of r interval was reset to", rmin, summary(unitname(X))$plural)) rinterval[1] <- rmin } else { ## problem rbadmax <- paste(max(r[badr]), summary(unitname(X))$plural) warning(paste("Some function values were infinite, NA or NaN", "at distances r up to", paste0(rbadmax, "."), "Consider specifying a shorter", sQuote("rinterval"))) } } #' simulated function values badsim <- matcolall(!is.finite(sim[ok,,drop=FALSE])) if(all(badsim)) stop(paste("Simulated function values are all infinite, NA or NaN.", "Check whether simulated patterns are empty"), call.=FALSE) if(any(badsim)) { warning(paste("In", sum(badsim), "out of", length(badsim), "simulations,", "the simulated function values were infinite, NA or NaN", "at every distance r.", "Check whether some simulated patterns are empty"), call.=FALSE) } #' finally trim data rok <- r[ok] obs <- obs[ok] sim <- sim[ok, ] reference <- reference[ok] nr <- sum(ok) if(nr == 0) { ## rinterval is very short: pick nearest r value ok <- which.min(abs(r - mean(rinterval))) nr <- 1L } ## determine rescaling if any if(is.null(scale)) { scaling <- NULL } else if(is.function(scale)) { scaling <- scale(rok) sname <- "scale(r)" ans <- check.nvector(scaling, nr, things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[rok > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1L]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) if(!all(is.finite(ddat))) warning("Some deviation values were Inf, NA or NaN") if(!all(is.finite(dsim))) warning("Some simulated deviations were Inf, NA or NaN") ## compute test statistic if(is.infinite(exponent)) { ## MAD devdata <- max(ddat,na.rm=TRUE) devsim <- apply(dsim, 2, max, na.rm=TRUE) names(devdata) <- "mad" testname <- paste("Maximum", deviationblurb, "test") statisticblurb <- paste("Maximum", deviationblurb) } else { L <- if(nr > 1) diff(rinterval) else 1 if(exponent == 2) { ## Cramer-von Mises ddat2 <- if(clamp) ddat^2 else (sign(ddat) * ddat^2) dsim2 <- if(clamp) dsim^2 else (sign(dsim) * dsim^2) devdata <- L * mean(ddat2, na.rm=TRUE) devsim <- L * .colMeans(dsim2, nr, nsim, na.rm=TRUE) names(devdata) <- "u" testname <- "Diggle-Cressie-Loosmore-Ford test" statisticblurb <- paste("Integral of squared", deviationblurb) } else if(exponent == 1) { ## integral absolute deviation devdata <- L * mean(ddat, na.rm=TRUE) devsim <- L * .colMeans(dsim, nr, nsim, na.rm=TRUE) names(devdata) <- "L1" testname <- paste("Integral", deviationblurb, "test") statisticblurb <- paste("Integral of", deviationblurb) } else { ## general p if(clamp) { ddatp <- ddat^exponent dsimp <- dsim^exponent } else { ddatp <- sign(ddat) * (abs(ddat)^exponent) dsimp <- sign(dsim) * (abs(dsim)^exponent) } devdata <- L * mean(ddatp, na.rm=TRUE) devsim <- L * .colMeans(dsimp, nr, nsim, na.rm=TRUE) names(devdata) <- "Lp" testname <- paste("Integrated", ordinal(exponent), "Power Deviation test") statisticblurb <- paste("Integral of", ordinal(exponent), "power of", deviationblurb) } } if(!interpolate) { ## standard Monte Carlo test ## compute rank and p-value datarank <- sum(devdata < devsim, na.rm=TRUE) + 1 nties <- sum(devdata == devsim, na.rm=TRUE) if(nties > 0) { tierank <- switch(tie.rule, mean = nties/2, randomise = sample(1:nties, 1L)) datarank <- datarank + tierank if(verbose) message("Ties were encountered") } pvalue <- datarank/(nsim+1) ## bookkeeping statistic <- data.frame(devdata, rank=datarank) colnames(statistic)[1L] <- names(devdata) } else { ## Dao-Genton style interpolation fhat <- density(devsim, na.rm=TRUE) pvalue <- with(fhat, { if(max(x) <= devdata) 0 else mean(y[x >= devdata]) * (max(x) - devdata) }) statistic <- data.frame(devdata) colnames(statistic)[1L] <- names(devdata) nties <- 0 } e <- attr(X, "einfo") nullmodel <- if(identical(e$csr, TRUE)) "CSR" else if(!is.null(e$simtype)) { switch(e$simtype, csr = "CSR", rmh = paste("fitted", if(identical(e$pois, TRUE)) "Poisson" else "Gibbs", "model"), kppm = "fitted cluster model", expr = "model simulated by evaluating expression", func = "model simulated by evaluating function", list = "model simulated by drawing patterns from a list", "unrecognised model") } else "unrecognised model" fname <- short.deparse(attr(X, "ylab")) uname <- with(summary(unitname(X)), if(!vanilla) paste(plural, explain) else NULL) testtype <- paste0(if(interpolate) "Interpolated " else NULL, "Monte Carlo") scaleblurb <- if(is.null(scale)) NULL else paste("Scale function:", paste(short.deparse(scale), collapse=" ")) refblurb <- if(theo.used) "theoretical" else "sample mean" leaveblurb <- if(leaveout == 0) paste("observed minus", refblurb) else if(leaveout == 1) "leave-one-out" else "leave-two-out" testname <- c(paste(testname, "of", nullmodel), paste(testtype, "test based on", nsim, "simulations", e$constraints), paste("Summary function:", fname), paste("Reference function:", refblurb), paste("Alternative:", alternative), paste("Interval of distance values:", prange(rinterval), uname), scaleblurb, paste("Test statistic:", statisticblurb), paste("Deviation =", leaveblurb) ) result <- structure(list(statistic = statistic, p.value = pvalue, method = testname, data.name = e$Yname), class="htest") attr(result, "rinterval") <- rinterval if(save.interpolant && interpolate) attr(result, "density") <- fhat if(save.envelope) { result <- hasenvelope(result, X) attr(result, "statistics") <- list(data=devdata, sim=devsim) attr(result, "info") <- list(exponent=exponent, alternative=alternative, nties=nties, leaveout=leaveout, interpolate=interpolate, scale=scale, clamp=clamp, tie.rule=tie.rule, use.theo=use.theo) } return(result) } spatstat.explore/R/rho2hat.R0000644000176200001440000002335614611073310015472 0ustar liggesusers# # rho2hat.R # # Relative risk for pairs of covariate values # # $Revision: 1.31 $ $Date: 2022/05/23 02:33:06 $ # rho2hat <- function(object, cov1, cov2, ..., method=c("ratio", "reweight")) { cov1name <- short.deparse(substitute(cov1)) cov2name <- short.deparse(substitute(cov2)) callstring <- short.deparse(sys.call()) method <- match.arg(method) # validate model if(is.ppp(object) || is.quad(object)) { model <- exactppm(object) reference <- "area" X <- object modelcall <- NULL } else if(inherits(object, "ppm")) { model <- object reference <- "model" if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'object' is a fitted model", call.=FALSE) X <- spatstat.model::data.ppm(model) modelcall <- model$call if(is.null(spatstat.model::getglmfit(model))) model <- update(model, forcefit=TRUE) } else stop("object should be a point pattern or a point process model") # interpret string "x" or "y" as a coordinate function getxyfun <- function(s) { switch(s, x = { function(x,y) { x } }, y = { function(x,y) { y } }, stop(paste("Unrecognised covariate name", sQuote(s)))) } if(is.character(cov1) && length(cov1) == 1) { cov1name <- cov1 cov1 <- getxyfun(cov1name) } if(is.character(cov2) && length(cov2) == 1) { cov2name <- cov2 cov2 <- getxyfun(cov2name) } if( (cov1name == "x" && cov2name == "y") || (cov1name == "y" && cov2name == "x")) { # spatial relative risk isxy <- TRUE needflip <- (cov1name == "y" && cov2name == "x") if(needflip) X <- flipxy(X) switch(method, ratio = { # ratio of smoothed intensity estimates den <- density(X, ...) sigma <- attr(den, "sigma") varcov <- attr(den, "varcov") W <- as.owin(den) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } rslt <- switch(reference, area = { den }, model = { lam <- blur(lambda, sigma=sigma, varcov=varcov, normalise=TRUE) eval.im(den/lam) }) }, reweight = { ## smoothed point pattern with weights = 1/reference W <- as.owin(X) W <- do.call.matched(as.mask, list(w=quote(W), ...)) if(!needflip) { lambda <- predict(model, locations=W) } else { lambda <- flipxy(predict(model, locations=flipxy(W))) } gstarX <- switch(reference, area = { rep.int(area(W), npoints(X)) }, model = { lambda[X] }) rslt <- density(X, ..., weights=1/gstarX) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) Z12points <- X r1 <- W$xrange r2 <- W$yrange lambda <- lambda[] } else { # general case isxy <- FALSE # harmonise covariates if(is.function(cov1) && is.im(cov2)) { cov1 <- as.im(cov1, W=cov2) } else if(is.im(cov1) && is.function(cov2)) { cov2 <- as.im(cov2, W=cov1) } # evaluate each covariate at data points and at pixels stuff1 <- spatialCovariateEvidence(model, cov1) stuff2 <- spatialCovariateEvidence(model, cov2) # unpack values1 <- stuff1$values values2 <- stuff2$values # covariate values at each data point Z1X <- values1$ZX Z2X <- values2$ZX # covariate values at each pixel Z1values <- values1$Zvalues Z2values <- values2$Zvalues # model intensity lambda <- values1$lambda # ranges of each covariate r1 <- range(Z1X, Z1values, finite=TRUE) r2 <- range(Z2X, Z2values, finite=TRUE) scal <- function(x, r) { (x - r[1])/diff(r) } # scatterplot coordinates Z12points <- ppp(scal(Z1X, r1), scal(Z2X, r2), c(0,1), c(0,1)) Z12pixels <- ppp(scal(Z1values, r1), scal(Z2values, r2), c(0,1), c(0,1)) # normalising constants # nX <- length(Z1X) npixel <- length(lambda) areaW <- area(Window(X)) pixelarea <- areaW/npixel baseline <- if(reference == "area") rep.int(1, npixel) else lambda wts <- baseline * pixelarea dont.complain.about(Z12pixels) switch(method, ratio = { # estimate intensities fhat <- density(Z12points, ...) sigma <- attr(fhat, "sigma") varcov <- attr(fhat, "varcov") ghat <- do.call(density.ppp, resolve.defaults(list(x=quote(Z12pixels), weights=quote(wts)), list(...), list(sigma=sigma, varcov=varcov))) # compute ratio of smoothed densities rslt <- eval.im(fhat/ghat) }, reweight = { # compute smoothed intensity with weight = 1/reference ghat <- density(Z12pixels, weights=wts, ...) rslt <- density(Z12points, weights=1/ghat[Z12points], ...) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) } # add scale and label info attr(rslt, "stuff") <- list(isxy=isxy, cov1=cov1, cov2=cov2, cov1name=cov1name, cov2name=cov2name, r1=r1, r2=r2, reference=reference, lambda=lambda, modelcall=modelcall, callstring=callstring, Z12points=Z12points, sigma=sigma, varcov=varcov) class(rslt) <- c("rho2hat", class(rslt)) rslt } plot.rho2hat <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # resolve "..." arguments rd <- resolve.defaults(list(...), list(add=FALSE, axes=!s$isxy, xlab=s$cov1name, ylab=s$cov2name)) # plot image plotparams <- graphicsPars("plot") do.call.matched(plot.im, resolve.defaults(list(x=quote(x), axes=FALSE), list(...), list(main=xname, ribargs=list(axes=TRUE))), extrargs=c(plotparams, "add", "zlim", "breaks")) # add axes if(rd$axes) { axisparams <- graphicsPars("axis") Axis <- function(..., extrargs=axisparams) { do.call.matched(graphics::axis, resolve.defaults(list(...)), extrargs=extrargs) } if(s$isxy) { # for (x,y) plots the image is at the correct physical scale xr <- x$xrange yr <- x$yrange spak <- 0.05 * max(diff(xr), diff(yr)) Axis(side=1, ..., at=pretty(xr), pos=yr[1] - spak) Axis(side=2, ..., at=pretty(yr), pos=xr[1] - spak) } else { # for other plots the image was scaled to the unit square rx <- s$r1 ry <- s$r2 px <- pretty(rx) py <- pretty(ry) Axis(side=1, labels=px, at=(px - rx[1])/diff(rx), ...) Axis(side=2, labels=py, at=(py - ry[1])/diff(ry), ...) } title(xlab=rd$xlab) title(ylab=rd$ylab) } if(do.points) { poy <- s$Z12points dont.complain.about(poy) do.call.matched(plot.ppp, resolve.defaults(list(x=quote(poy), add=TRUE), list(...)), extrargs=c("pch", "col", "cols", "bg", "cex", "lwd", "lty")) } invisible(NULL) } print.rho2hat <- function(x, ...) { s <- attr(x, "stuff") cat("Scatterplot intensity estimate (class rho2hat)\n") cat(paste("for the covariates", s$cov1name, "and", s$cov2name, "\n")) switch(s$reference, area=cat("Function values are absolute intensities\n"), model={ cat("Function values are relative to fitted model\n") print(s$modelcall) }) cat(paste("Call:", s$callstring, "\n")) if(s$isxy) { cat("Obtained by spatial smoothing of original data\n") cat("Smoothing parameters used by density.ppp:\n") } else { cat("Obtained by transforming to the unit square and smoothing\n") cat("Smoothing parameters (on unit square) used by density.ppp:\n") } if(!is.null(s$sigma)) cat(paste("\tsigma = ", signif(s$sigma, 5), "\n")) if(!is.null(s$varcov)) { cat("\tvarcov =\n") ; print(s$varcov) } cat("Intensity values:\n") NextMethod("print") } predict.rho2hat <- function(object, ..., relative=FALSE) { if(length(list(...)) > 0) warning("Additional arguments ignored in predict.rho2hat") # extract info s <- attr(object, "stuff") reference <- s$reference #' extract images of covariate Z1 <- s$cov1 Z2 <- s$cov2 if(!is.im(Z1)) Z1 <- as.im(Z1, Window(object)) if(!is.im(Z2)) Z2 <- as.im(Z2, Window(object)) #' rescale to [0,1] Z1 <- scaletointerval(Z1, xrange=s$r1) Z2 <- scaletointerval(Z2, xrange=s$r2) # extract pairs of covariate values ZZ <- pairs(Z1, Z2, plot=FALSE) # apply rho to Z YY <- safelookup(object, ppp(ZZ[,1], ZZ[,2], c(0,1), c(0,1)), warn=FALSE) # reform as image Y <- Z1 Y[] <- YY # adjust to reference baseline if(!(relative || reference == "area")) { lambda <- s$lambda Y <- Y * lambda } return(Y) } spatstat.explore/R/SmoothHeat.R0000644000176200001440000000464114700374621016202 0ustar liggesusers#' #' SmoothHeat.R #' #' Nadaraya-Watson style smooth regression using diffusion #' #' Copyright (C) 2018-2024 Adrian Baddeley, Tilman Davies and Suman Rakshit #' #' $Revision: 1.3 $ $Date: 2024/10/06 01:26:29 $ SmoothHeat <- function(X, ...) { UseMethod("SmoothHeat") } SmoothHeat.im <- function(X, sigma, ...) { blurHeat(X, sigma, ...) } SmoothHeat.ppp <- function(X, sigma, ..., weights=NULL) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) marx <- marks(X) if(!is.vector(marx)) stop("Marks of X should be a numeric vector") marx <- as.numeric(marx) if(is.null(weights)) { numwt <- marx denwt <- NULL } else { check.nvector(weights, npoints(X), oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, npoints(X)) numwt <- marx * weights denwt <- weights } Y <- unmark(X) numer <- densityHeat(Y, sigma, weights=numwt, ...) denom <- densityHeat(Y, sigma, weights=denwt, ...) return(numer/denom) } bw.SmoothHeatppp <- function(X, ..., weights=NULL, srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) marx <- marks(X) if(!is.vector(marx)) stop("Marks of X should be a numeric vector") marx <- as.numeric(marx) if(is.null(weights)) { numwt <- marx denwt <- NULL } else { check.nvector(weights, npoints(X), oneok=TRUE) if(length(weights) == 1) weights <- rep(weights, npoints(X)) numwt <- marx * weights denwt <- weights } #' compute weighted and unweighted intensity estimates U <- unmark(X) aNumer <- HeatEstimates.ppp(U, ..., weights=numwt, srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) aDenom <- HeatEstimates.ppp(U, ..., weights=denwt, srange=srange, ns=ns, sigma=sigma, leaveoneout=leaveoneout, verbose=verbose) h <- aDenom$h hname <- aDenom$hname #' compute smoother zhat <- aNumer$lambda/aDenom$lambda #' compute least squares cross-validation criterion zobs <- matrix(marx, nrow(zhat), ncol(zhat), byrow=TRUE) CV <- rowSums((zhat - zobs)^2) iopt <- which.min(CV) result <- bw.optim(CV, h, iopt, criterion="Least squares cross-validation", hname=hname, unitname=unitname(X)) return(result) } spatstat.explore/R/Jmulti.R0000644000176200001440000001275614611073307015377 0ustar liggesusers# Jmulti.S # # Usual invocations to compute multitype J function(s) # if F and G are not required # # $Revision: 4.46 $ $Date: 2023/02/28 02:05:58 $ # # # "Jcross" <- function(X, i, j, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{ij}(r) # # X: point pattern (an object of class 'ppp') # i, j: types for which J_{i,j}(r) is calculated # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) checkspacing <- !isFALSE(list(...)$checkspacing) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) # if(i == j){ result <- Jest(X[I], eps=eps, r=r, breaks=breaks, correction=correction, checkspacing=checkspacing) } else { J <- (marx == j) result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=TRUE, correction=correction, checkspacing=checkspacing) } conserve <- attr(result, "conserve") result <- rebadge.as.crossfun(result, "J", NULL, i, j) attr(result, "conserve") <- conserve return(result) } "Jdot" <- function(X, i, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{i\dot}(r) # # X: point pattern (an object of class 'ppp') # i: mark i for which we calculate J_{i\cdot}(r) # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) checkspacing <- !isFALSE(list(...)$checkspacing) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) J <- rep.int(TRUE, X$n) # result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=FALSE, correction=correction, checkspacing=checkspacing) conserve <- attr(result, "conserve") result <- rebadge.as.dotfun(result, "J", NULL, i) attr(result, "conserve") <- conserve return(result) } "Jmulti" <- function(X, I, J, eps=NULL, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=NULL) { # # multitype J function (generic engine) # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # eps: raster grid mesh size for distance transform # (unless specified by X$window) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # # X <- as.ppp(X) W<- X$window I <- ppsubset(X, I, "I") J <- ppsubset(X, J, "J") if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") XJ <- X[J] lambdaJ <- intensity(XJ) rmaxdefault <- rmax.rule("J", W, lambdaJ) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val FJ <- Fest(XJ, eps, breaks=brks, correction=correction, ...) GIJ <- Gmulti(X, I, J, breaks=brks, disjoint=disjoint, correction=correction, ...) rvals <- FJ$r Fnames <- names(FJ) Gnames <- names(GIJ) bothnames <- Fnames[Fnames %in% Gnames] # initialise fv object alim <- attr(FJ, "alim") fname <- c("J", "list(I,J)") Z <- fv(data.frame(r=rvals, theo=1), "r", quote(J[I,J](r)), "theo", . ~ r, alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(J[list(I,J)](r))) # add pieces manually ratio <- function(a, b) { result <- a/b result[ b == 0 ] <- NA result } if("raw" %in% bothnames) { Jun <- ratio(1-GIJ$raw, 1-FJ$raw) Z <- bind.fv(Z, data.frame(un=Jun), makefvlabel(NULL, "hat", fname, "un"), "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- ratio(1-GIJ$rs, 1-FJ$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), makefvlabel(NULL, "hat", fname, "rs"), "border corrected estimate of %s", "rs") } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratio(1-GIJ$han, 1-FJ$cs) Z <- bind.fv(Z, data.frame(han=Jhan), makefvlabel(NULL, "hat", fname, "han"), "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- ratio(1-GIJ$km, 1-FJ$km) Z <- bind.fv(Z, data.frame(km=Jkm), makefvlabel(NULL, "hat", fname, "km"), "Kaplan-Meier estimate of %s", "km") if("hazard" %in% names(GIJ) && "hazard" %in% names(FJ)) { Jhaz <- GIJ$hazard - FJ$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add other info attr(Z, "G") <- GIJ attr(Z, "F") <- FJ attr(Z, "conserve") <- attr(FJ, "conserve") unitname(Z) <- unitname(X) return(Z) } spatstat.explore/R/localpcf.R0000644000176200001440000001413514611073310015701 0ustar liggesusers# # localpcf.R # # $Revision: 1.27 $ $Date: 2022/05/21 08:53:38 $ # # localpcf <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, rvalue=NULL) { if(length(list(...)) > 0) warning("Additional arguments ignored") stopifnot(is.ppp(X)) localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, rvalue=rvalue) } localpcfinhom <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE, rvalue=NULL) { stopifnot(is.ppp(X)) a <- resolve.lambda(X, lambda, ..., sigma=sigma, varcov=varcov, update=update, leaveoneout=leaveoneout) result <- localpcfengine(X, ..., delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=a$lambda, rvalue=rvalue) if(is.null(rvalue) && a$danger) attr(result, "dangerous") <- a$dangerous return(result) } localpcfengine <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, rvalue=NULL) { if(!is.null(rvalue)) rmax <- rvalue m <- localpcfmatrix(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) r <- attr(m, "r") if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(m[nr,])) } delta <- attr(m, "delta") nX <- npoints(X) if(nX == 0) { df <- data.frame(r=r, theo=rep.int(1, length(r))) nama <- desc <- labl <- NULL } else { # border correction dbord <- bdist.points(X) m[r[row(m)] > dbord[col(m)]] <- NA # df <- data.frame(m, r=r, theo=rep.int(1, length(r))) icode <- unlist(lapply(seq_len(nX), numalign, nmax=nX)) nama <- paste("est", icode, sep="") desc <- paste("estimate of %s for point", icode) labl <- paste("%s[", icode, "](r)", sep="") } names(df) <- c(nama, "r", "theo") desc <- c(desc, "distance argument r", "theoretical Poisson %s") labl <- c(labl, "r", "%s[pois](r)") # create fv object g <- fv(df, "r", quote(localg(r)), "theo", , c(0, max(r)), labl, desc, fname="localg") # default is to display them all formula(g) <- . ~ r fvnames(g, ".") <- names(df)[names(df) != "r"] unitname(g) <- unitname(X) attr(g, "delta") <- delta attr(g, "correction") <- "border" return(g) } localpcfmatrix <- function(X, i=seq_len(npoints(X)), ..., lambda = NULL, delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { missi <- missing(i) weighted <- !is.null(lambda) nX <- npoints(X) nY <- if(missi) nX else length(seq_len(nX)[i]) W <- as.owin(X) lambda.ave <- nX/area(W) if(is.null(delta)) delta <- stoyan/sqrt(lambda.ave) if(is.null(rmax)) rmax <- rmax.rule("K", W, lambda.ave) # if(nX == 0 || nY == 0) { out <- matrix(0, nr, 0) } else { # sort points in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] idXsort <- (1:nX)[oX] if(weighted) { lambdaXsort <- lambda[oX] weightXsort <- 1/lambdaXsort } if(missi) { Y <- X oY <- oX Ysort <- Xsort idYsort <- idXsort } else { # i is some kind of index Y <- X[i] idY <- (1:nX)[i] oY <- fave.order(Y$x) Ysort <- Y[oY] idYsort <- idY[oY] } nY <- npoints(Y) force(nr) # call C if(!weighted) { zz <- .C(SE_locpcfx, nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE="spatstat.explore") } else { zz <- .C(SE_locWpcfx, nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), w2 = as.double(weightXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), PACKAGE="spatstat.explore") } out <- matrix(zz$pcf, nr, nY) # reorder columns to match original out[, oY] <- out # rescale out <- out/(2 * pi * if(!weighted) lambda.ave else 1) } # dress up attr(out, "r") <- seq(from=0, to=rmax, length.out=nr) attr(out, "delta") <- delta class(out) <- c("localpcfmatrix", class(out)) return(out) } print.localpcfmatrix <- function(x, ...) { cat("Matrix of local pair correlation estimates\n") nc <- ncol(x) nr <- nrow(x) cat(paste("pcf estimates for", nc, ngettext(nc, "point", "points"), "\n")) rval <- attr(x, "r") cat(paste("r values from 0 to", max(rval), "in", nr, "steps\n")) return(invisible(NULL)) } plot.localpcfmatrix <- function(x, ...) { xname <- short.deparse(substitute(x)) force(x) rval <- attr(x, "r") do.call(matplot, resolve.defaults(list(rval, quote(x)), list(...), list(type="l", main=xname, xlab="r", ylab="pair correlation"))) } "[.localpcfmatrix" <- function(x, i, ...) { r <- attr(x, "r") delta <- attr(x, "delta") class(x) <- "matrix" if(missing(i)) { x <- x[ , ...] } else { x <- x[i, ...] if(is.matrix(i)) return(x) r <- r[i] } if(!is.matrix(x)) x <- matrix(x, nrow=length(r)) attr(x, "r") <- r attr(x, "delta") <- delta class(x) <- c("localpcfmatrix", class(x)) return(x) } spatstat.explore/R/boyce.R0000644000176200001440000000275514611073307015232 0ustar liggesusers#' boyce.R #' #' Discrete and continuous Boyce index #' #' $Revision: 1.3 $ $Date: 2024/01/31 06:59:18 $ #' #' Copyright (c) 2024 Adrian Baddeley boyce <- function(X, Z, ..., breaks=NULL, halfwidth=NULL) { stopifnot(is.ppp(X)) lbar <- intensity(unmark(X)) if(is.im(Z) && Z$type == "factor") { ## convert to tessellation Z <- tess(image=Z) } if(is.tess(Z)) { ## discrete Boyce index Y <- as.tess(quadratcount(X, tess=Z)) lam <- marks(Y)[,1]/tile.areas(Y) result <- Z marks(result) <- lam/lbar } else { ## continuous Boyce index ngiven <- (!is.null(breaks)) + (!is.null(halfwidth)) if(ngiven == 0) stop("Either 'breaks' or 'halfwidth' should be given") if(ngiven == 2) stop("Arguments 'breaks' and 'halfwidth' are incompatible") if(!is.null(breaks)) { ## discrete Boyce index based on bands of Z values result0 <- rhohat(X, Z, smoother="piecewise", breaks=breaks, ...) } else { ## continuous Boyce index, equivalent to rhohat using box kernel result0 <- rhohat(X, Z, kernel="rect", bw=halfwidth/kernel.factor("rect"), ...) } result <- eval.fv(result0/lbar) newylab <- substitute(B(x), list(x=as.name(fvnames(result0, ".x")))) oldlabels <- attr(result0, "labl") newlabels <- oldlabels[colnames(result0) %in% colnames(result)] result <- rebadge.fv(result, new.fname="B", new.ylab=newylab, new.labl=newlabels) } return(result) } spatstat.explore/R/Hest.R0000644000176200001440000001042214611073307015022 0ustar liggesusers# # Hest.R # # Contact distribution for a random set # # Hest <- local({ Hest <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) { if(missing(W)) W <- NULL HestEngine(X, r=r, breaks=breaks, ..., W=W, correction=correction, conditional=conditional) } HestEngine <- function(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE, checkspacing=TRUE, testme=FALSE) { rorbgiven <- !is.null(r) || !is.null(breaks) if(is.ppp(X) || is.psp(X)) { XX <- X W0 <- Window(X) } else if(is.owin(X)) { XX <- X W0 <- Frame(X) } else if(is.im(X)) { if(!is.logical(ZeroValue(X))) stop("When X is an image, its pixel values should be logical values") XX <- solutionset(X) W0 <- Window(X) } else stop("X should be an object of class ppp, psp, owin or im") ## if(given.W <- !missing(W) && !is.null(W)) { stopifnot(is.owin(W)) if(!is.subset.owin(W, W0)) stop("W is not a subset of the observation window of X") } else { W <- W0 } ## handle corrections if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable ## compute distance map D <- distmap(XX, ...) pixeps <- with(D, min(xstep, ystep)) if(!given.W && !is.im(X)) { B <- attr(D, "bdry") } else { B <- distmap(W, invert=TRUE, ...) har <- harmonise(D=D, B=B) D <- har$D[W, drop=FALSE] B <- har$B[W, drop=FALSE] } ## histogram breakpoints dmax <- max(D) breaks <- handle.r.b.args(r, breaks, W, NULL, rmaxdefault=dmax) rval <- breaks$r if(testme || (rorbgiven && checkspacing)) check.finespacing(rval, rname="r", eps=pixeps/4, W, rmaxdefault=dmax, context="in Hest(X,r)", action="fatal") ## extract distances and censoring distances dist <- as.vector(as.matrix(D)) bdry <- as.vector(as.matrix(B)) ok <- !is.na(dist) & !is.na(bdry) dist <- dist[ok] bdry <- bdry[ok] ## delete zero distances if(is.owin(X) || is.im(X)) { pos <- (dist > 0) areafraction <- 1 - mean(pos) dist <- dist[pos] bdry <- bdry[pos] } ## censoring indicators d <- (dist <= bdry) ## observed distances o <- pmin.int(dist, bdry) ## calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(W, rval) else NULL, tt=dist) ## conditional on d > 0 ? if(is.owin(X) || is.im(X)) { if(conditional) { if(corx$km) Z$km <- condition(Z$km) if(corx$rs) Z$rs <- condition(Z$rs) if(corx$han) Z$han <- condition(Z$han) if(corx$none) Z$raw <- condition(Z$raw) } else { if(corx$km) Z$km <- reconstitute(Z$km, areafraction) if(corx$rs) Z$rs <- reconstitute(Z$rs, areafraction) if(corx$han) Z$han <- reconstitute(Z$han, areafraction) if(corx$none) Z$raw <- reconstitute(Z$raw, areafraction) } } ## relabel Z <- rebadge.fv(Z, substitute(H(r), NULL), "H") unitname(Z) <- unitname(X) attr(Z, "conserve") <- list(checkspacing=FALSE) return(Z) } condition <- function(x) { (x - x[1])/(1-x[1]) } reconstitute <- function(x, p) { p + (1-p) * x } Hest }) spatstat.explore/R/densityAdaptiveKernel.ppp.R0000644000176200001440000001072114627503021021213 0ustar liggesusers#' #' densityAdaptiveKernel.ppp.R #' #' $Revision: 1.16 $ $Date: 2024/06/04 03:09:11 $ #' #' #' Adaptive kernel smoothing via 3D FFT #' densityAdaptiveKernel.ppp <- function(X, bw, ..., weights=NULL, at=c("pixels", "points"), edge=TRUE, ngroups) { stopifnot(is.ppp(X)) at <- match.arg(at) nX <- npoints(X) if(nX == 0) switch(at, points = return(numeric(nX)), pixels = return(as.im(0, W=Window(X), ...))) if(missing(ngroups) || is.null(ngroups)) { ## default rule ngroups <- max(1L, floor(sqrt(nX))) } else if(any(is.infinite(ngroups))) { ngroups <- nX } else { check.1.integer(ngroups) ngroups <- min(nX, ngroups) } if(weighted <- !is.null(weights)) { check.nvector(weights, nX, oneok=TRUE, vname="weights") if(length(weights) == 1) weights <- rep(weights, nX) } else weights <- rep(1,nX) ## determine bandwidth for each data point if(missing(bw) || is.null(bw)) { bw <- do.call.matched(bw.abram, resolve.defaults(list(X=quote(X), at="points"), list(...)), extrargs=names(args(as.mask))) } else if(is.numeric(bw)) { check.nvector(bw, nX, oneok=TRUE, vname="bw") if(length(bw) == 1) bw <- rep(bw, nX) } else if(is.im(bw)) { bw <- safelookup(bw, X, warn=FALSE) if(anyNA(bw)) stop("Some data points lie outside the domain of image 'bw'", call.=FALSE) } else if(inherits(bw, "funxy")) { bw <- bw(X) if(anyNA(bw)) stop("Some data points lie outside the domain of function 'bw'", call.=FALSE) } else stop("Argument 'bw' should be a numeric vector or a pixel image") #' divide bandwidths into groups if(ngroups == nX) { ## every data point is a separate group groupid <- 1:nX qmid <- bw } else { ## usual case p <- seq(0,1,length=ngroups+1) qbands <- quantile(bw, p) groupid <- findInterval(bw,qbands,all.inside=TRUE) #' map to middle of group pmid <- (p[-1] + p[-length(p)])/2 qmid <- quantile(bw, pmid) } marks(X) <- if(weighted) weights else NULL group <- factor(groupid, levels=1:ngroups) Y <- split(X, group) Z <- mapply(density.ppp, x=Y, sigma=as.list(qmid), weights=lapply(Y, marks), MoreArgs=list(edge=edge, at=at, ...), SIMPLIFY=FALSE) ZZ <- switch(at, pixels = im.apply(Z, "sum"), points = unsplit(Z, group)) return(ZZ) } densityAdaptiveKernel.ppplist <- densityAdaptiveKernel.splitppp <- function(X, bw=NULL, ..., weights=NULL) { n <- length(X) bw <- ensure.listarg(bw, n=n, singletypes=c("NULL", "im", "funxy"), xtitle="bw") weights <- ensure.listarg(weights, n=n, singletypes=c("NULL", "im", "funxy", "expression"), xtitle="weights") y <- mapply(densityAdaptiveKernel.ppp, X=X, bw=bw, weights=weights, MoreArgs=list(...), SIMPLIFY=FALSE) return(as.solist(y, demote=TRUE)) } ## move this to spatstat.data when stable ensure.listarg <- function(x, n, singletypes=character(0), xtitle=NULL, things="point patterns") { if(inherits(x, singletypes)) { ## single object: replicate it x <- rep(list(x), n) return(x) } if(!is.list(x)) { ## error if(is.null(xtitle)) xtitle <- short.deparse(substitute(x)) whinge <- paste(xtitle, "should be a list") if(length(singletypes)) { otypes <- setdiff(singletypes, "NULL") if(length(otypes)) whinge <- paste(whinge, "or an object of class", commasep(dQuote(otypes), "or")) if("NULL" %in% singletypes) whinge <- paste(whinge, "or NULL") } stop(whinge, call.=FALSE) } nx <- length(x) if(nx != n) { if(is.null(xtitle)) xtitle <- short.deparse(substitute(x)) whinge <- paste("The length of", sQuote(xtitle), "should equal the number of", things, paren(paste(nx, "!=", n))) stop(whinge, call.=FALSE) } return(x) } spatstat.explore/R/derivfv.R0000644000176200001440000001040014611073310015552 0ustar liggesusers# # derivfv.R # # differentiation for fv objects # # $Revision: 1.7 $ $Date: 2018/09/28 05:12:08 $ # deriv.fv <- local({ derivative <- function(y, r, ...) { ss <- smooth.spline(r, y, ...) predict(ss, r, deriv=1)$y } deriv.fv <- function(expr, which="*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) { f <- expr method <- match.arg(method) ## select columns ## if(length(which) == 1L && which %in% .Spatstat.FvAbbrev) { if(length(which) == 1L) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(f, which) } if(any(nbg <- !(which %in% names(f)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) relevant <- names(f) %in% which ## get rname <- fvnames(f, ".x") df <- as.data.frame(f) rpos <- which(colnames(df) == rname) rvals <- df[,rpos] yvals <- df[,relevant,drop=FALSE] nr <- length(rvals) ## if(Dperiodic) { ## Derivative should be periodic ## Recycle data to imitate periodicity DR <- diff(range(rvals)) rvals <- c(rvals[-nr] - DR, rvals, rvals[-1L] + DR) yleft <- yvals[-nr, , drop=FALSE] yright <- yvals[-1L, , drop=FALSE] if(!periodic) { ## original data are not periodic (e.g. cdf of angular variable) ## but derivative must be periodic jump <- matrix(as.numeric(yvals[nr,] - yvals[1L, ]), nr-1L, ncol(yvals), byrow=TRUE) yleft <- yleft - jump yright <- yright + jump } yvals <- rbind(yleft, yvals, yright) actual <- nr:(2*nr - 1L) NR <- length(rvals) } else { NR <- nr actual <- 1:nr } ## cut x axis into intervals? if(is.null(kinks)) { cutx <- factor(rep(1, NR)) } else { rr <- range(rvals) if(periodic) kinks <- c(kinks-DR, kinks, kinks+DR) breaks <- sortunique(kinks) if(breaks[1L] > rr[1L]) breaks <- c(rr[1L], breaks) if(max(breaks) < rr[2L]) breaks <- c(breaks, rr[2L]) cutx <- cut(rvals, breaks=breaks, include.lowest=TRUE) } ## process for(segment in levels(cutx)) { ii <- (cutx == segment) yy <- yvals[ii, , drop=FALSE] switch(method, numeric = { dydx <- apply(yy, 2, diff)/diff(rvals[ii]) nd <- nrow(dydx) dydx <- rbind(dydx, dydx[nd, ]) }, spline = { dydx <- apply(yy, 2, derivative, r=rvals[ii], ...) }) df[ii[actual], relevant] <- dydx[ actual, ] } ## pack up result <- f result[,] <- df ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(result, "ylab") <- substitute(bold(D)~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(result, "yexp") <- substitute(bold(D)~Fx, list(Fx=ye)) ## tweak mathematical labels attr(result, "labl")[relevant] <- paste0("bold(D)~", attr(f, "labl")[relevant]) return(result) } deriv.fv }) increment.fv <- function(f, delta) { stopifnot(is.fv(f)) check.1.real(delta) stopifnot(delta > 0) half <- delta/2 xx <- with(f, .x) ynames <- fvnames(f, ".") yy <- as.data.frame(lapply(ynames, function(a, xx, f, h) { g <- as.function(f, value=a) g(xx+h)-g(xx-h) }, xx=xx, f=f, h=half)) Y <- f Y[,ynames] <- yy ## tweak name of function if(!is.null(yl <- attr(f, "ylab"))) attr(Y, "ylab") <- substitute(Delta~Fx, list(Fx=yl)) if(!is.null(ye <- attr(f, "yexp"))) attr(Y, "yexp") <- substitute(Delta~Fx, list(Fx=ye)) ## tweak mathematical labels relevant <- colnames(Y) %in% ynames attr(Y, "labl")[relevant] <- paste0("Delta~", attr(f, "labl")[relevant]) ## tweak recommended range attr(Y, "alim") <- intersect.ranges(attr(f, "alim"), range(xx) + c(1,-1)*half) return(Y) } spatstat.explore/R/spatcov.R0000644000176200001440000000450214611073311015573 0ustar liggesusers#' estimate covariance function of a random field #' assuming stationary (and optionally - isotropic) #' #' Naive moment estimator #' #' Originally written for Max Chatfield #' original: Adrian Baddeley 15-19 may 2020 #' $Revision: 1.11 $ $Date: 2021/05/03 02:40:27 $ spatcov <- function(X, Y=X, ..., correlation=FALSE, isotropic=TRUE, clip=TRUE, pooling=TRUE) { stopifnot(is.im(X)) eX <- X - mean(X) if(correlation) eX <- eX/sqrt(mean(eX^2)) if(missing(Y) || is.null(Y)) { #' spatial covariance of X A <- imcov(eX) } else { #' spatial cross-covariance of X and Y stopifnot(is.im(Y)) eY <- Y - mean(Y) if(correlation) eY <- eY/sqrt(mean(eY^2)) A <- imcov(eX, eY) } B <- setcov(Window(X)) if(!(isotropic && pooling)) { #' first estimate covariance as function of vector argument Z <- A/B #' deal with numerical errors at extremes pixelarea <- with(X, xstep * ystep) Z[B < pixelarea] <- 0 } if(isotropic) { #' result is a function of lag distance if(pooling) { mA <- rotmean(A) mB <- rotmean(B) f <- eval.fv(mA/mB) } else { f <- rotmean(Z) } #' give it more meaningful labels f <- rebadge.fv(f, new.ylab=quote(C(r)), new.fname="C", tags=fvnames(f, ".y"), new.tags="est", new.desc="estimate of %s", new.labl="hat(%s)(r)") if(clip) attr(f, "alim") <- c(0, shortside(Frame(X))/2) result <- f } else { #' result is an image representing a function of lag vector Z <- A/B #' return an image representing a function of lag vector if(clip) { Box <- Frame(Z) b <- sidelengths(Box) Bclip <- trim.rectangle(Box, b[1]/4, b[2]/4) Z <- Z[Bclip, drop=FALSE, tight=TRUE] } result <- Z } return(result) } pairMean <- function(fun, W, V=NULL, ..., normalise=TRUE) { #' fun is a function of pairwise distance if(!is.function(fun)) stop("fun should be a function in the R language") #' W is the domain over which to integrate W <- as.owin(W) FD <- distcdf(W, V, ..., savedenom=!normalise) result <- as.numeric(stieltjes(fun, FD, ...)) if(!normalise) result <- result * attr(FD, "denom") return(result) } spatstat.explore/R/densityHeat.ppp.R0000644000176200001440000002656314700374620017214 0ustar liggesusers#' #' densityHeat.ppp.R #' #' Diffusion estimator of density/intensity #' #' Copyright (C) 2018-2024 Adrian Baddeley, Tilman Davies and Suman Rakshit #' #' Licence: GNU Public Licence >= 2 #' densityHeat <- function(x, sigma, ...) { UseMethod("densityHeat") } densityHeat.ppp <- function(x, sigma, ..., weights=NULL, connect=8, symmetric=FALSE, sigmaX=NULL, k=1, show=FALSE, se=FALSE, at=c("pixels", "points"), leaveoneout = TRUE, extrapolate = FALSE, coarsen = TRUE, verbose=TRUE, internal=NULL) { stopifnot(is.ppp(x)) nX <- npoints(x) at <- match.arg(at) if(length(weights)) check.nvector(weights, nX) else weights <- NULL if(extrapolate) { ## Richardson extrapolation ## first compute intensity estimate on the desired grid cl <- sys.call() cl$extrapolate <- FALSE L <- eval(cl, sys.parent()) dimL <- dim(L) ## remove all function arguments that control pixel resolution cl$dimyx <- cl$eps <- cl$xy <- NULL if(coarsen) { ## compute on the desired grid and on a coarser grid Lfine <- L dimfine <- dimL ## compute on coarser grid dimcoarse <- round(dimfine/2) cl$dimyx <- dimcoarse Lcoarse <- eval(cl, sys.parent()) ## interpolate coarse to fine Lcoarse <- as.im(interp.im, W=Window(Lfine), Z=Lcoarse, xy=Lfine, bilinear=TRUE) } else { ## compute on the desired grid and a finer grid Lcoarse <- L dimcoarse <- dimL ## compute on finer grid dimfine <- round(dimcoarse * 2) cl$dimyx <- dimfine Lfine <- eval(cl, sys.parent()) ## sample from fine to coarse Lfine <- as.im(Lfine, xy=Lcoarse) } ## Richardson extrapolation, ratio = 2, exponent = 1 Lextrap <- 2 * Lfine - Lcoarse if(se) attr(Lextrap, "se") <- attr(L, "se") return(Lextrap) } delayed <- !is.null(sigmaX) setuponly <- identical(internal$setuponly, TRUE) want.Xpos <- delayed || setuponly if(!setuponly && (se || (at == "points" && leaveoneout))) { #' NEED INDIVIDUAL HEAT KERNELS FOR EACH DATA POINT #' to calculate estimate and standard error, #' or leave-one-out estimate if(!is.null(sigmaX)) stop("variance calculation is not implemented for lagged arrivals") lambda <- varlam <- switch(at, pixels = as.im(0, W=Window(x), ...), points = numeric(nX)) if(verbose) { pstate <- list() cat(paste("Processing", nX, "heat kernels... ")) } if(is.null(weights)) { ## unweighted calculation: coded separately for efficiency for(i in seq_len(nX)) { Heat.i <- densityHeat.ppp(x[i], sigma, ..., connect=connect, symmetric=symmetric, k=k) switch(at, pixels = { lambda <- lambda + Heat.i varlam <- varlam + Heat.i^2 }, points = { if(leaveoneout) { Heat.ixi <- safelookup(Heat.i,x[-i],warn=FALSE) #'was: Heat.ixi <- Heat.i[ x[-i] ] lambda[-i] <- lambda[-i] + Heat.ixi varlam[-i] <- varlam[-i] + Heat.ixi^2 } else { lambda <- lambda + Heat.i[x] varlam <- varlam + Heat.i[x]^2 } }) if(verbose) pstate <- progressreport(i, nX, state=pstate) } } else { ## weighted calculation for(i in seq_len(nX)) { Heat.i <- densityHeat.ppp(x[i], sigma, ..., connect=connect, symmetric=symmetric, k=k) w.i <- weights[i] switch(at, pixels = { lambda <- lambda + w.i * Heat.i varlam <- varlam + w.i * Heat.i^2 }, points = { if(leaveoneout) { Heat.ixi <- Heat.i[ x[-i] ] lambda[-i] <- lambda[-i] + w.i * Heat.ixi varlam[-i] <- varlam[-i] + w.i * Heat.ixi^2 } else { lambda <- lambda + w.i * Heat.i[x] varlam <- varlam + w.i * Heat.i[x]^2 } }) if(verbose) pstate <- progressreport(i, nX, state=pstate) } } if(verbose) splat("Done.") result <- lambda attr(result, "se") <- sqrt(varlam) return(result) } check.1.integer(k) stopifnot(k >= 1) if(!(connect %in% c(4,8))) stop("connectivity must be 4 or 8") ## initial state for diffusion if(delayed) { #' smoothing bandwidths attributed to each data point check.nvector(sigmaX, nX) stopifnot(all(is.finite(sigmaX))) stopifnot(all(sigmaX >= 0)) if(missing(sigma)) sigma <- max(sigmaX) else check.1.real(sigma) #' sort in decreasing order of bandwidth osx <- order(sigmaX, decreasing=TRUE) sigmaX <- sigmaX[osx] x <- x[osx] #' discretise window W <- do.call.matched(as.mask, resolve.defaults(list(...), list(w=Window(x)))) #' initial state is zero Y <- as.im(W, value=0) #' discretised coordinates Xpos <- nearest.valid.pixel(x$x, x$y, Y) } else { #' pixellate pattern Y <- pixellate(x, ..., weights=weights, preserve=TRUE, savemap=want.Xpos) Xpos <- attr(Y, "map") } #' validate sigma if(is.im(sigma)) { # ensure Y and sigma are on the same grid A <- harmonise(Y=Y, sigma=sigma) Y <- A$Y sigma <- A$sigma } else if(is.function(sigma)) { sigma <- as.im(sigma, as.owin(Y)) } else { sigma <- as.numeric(sigma) check.1.real(sigma) } #' normalise as density pixelarea <- with(Y, xstep * ystep) Y <- Y / pixelarea v <- as.matrix(Y) #' initial state u <- as.vector(v) if(want.Xpos) { #' map (row, col) to serial number serial <- matrix(seq_len(length(v)), nrow(v), ncol(v)) Xpos <- serial[as.matrix(as.data.frame(Xpos))] } #' symmetric random walk? if(symmetric) { asprat <- with(Y, ystep/xstep) if(abs(asprat-1) > 0.01) warning(paste("Symmetric random walk on a non-square grid", paren(paste("aspect ratio", asprat))), call.=FALSE) } #' determine appropriate jump probabilities & time step pmax <- 1/(connect+1) # maximum permitted jump probability xstep <- Y$xstep ystep <- Y$ystep minstep <- min(xstep, ystep) if(symmetric) { #' all permissible transitions have the same probability 'pjump'. #' Determine Nstep, and dt=sigma^2/Nstep, such that #' Nstep >= 16 and M * pjump * minstep^2 = dt M <- if(connect == 4) 2 else 6 Nstep <- max(16, ceiling(max(sigma)^2/(M * pmax * minstep^2))) dt <- sn <- (sigma^2)/Nstep px <- py <- pxy <- sn/(M * minstep^2) } else { #' px is the probability of jumping 1 step to the right #' py is the probability of jumping 1 step up #' if connect=4, horizontal and vertical jumps are exclusive. #' if connect=8, horizontal and vertical increments are independent #' Determine Nstep, and dt = sigma^2/Nstep, such that #' Nstep >= 16 and 2 * pmax * minstep^2 = dt Nstep <- max(16, ceiling(max(sigma)^2/(2 * pmax * minstep^2))) dt <- sn <- (sigma^2)/Nstep px <- sn/(2 * xstep^2) py <- sn/(2 * ystep^2) if(max(px) > pmax) stop("Internal error: px exceeds pmax") if(max(py) > pmax) stop("Internal error: py exceeds pmax") if(connect == 8) pxy <- px * py } #' arrival times if(!is.null(sigmaX)) iarrive <- pmax(1, pmin(Nstep, Nstep - round((sigmaX^2)/sn))) #' construct adjacency matrices dimv <- dim(v) my <- gridadjacencymatrix(dimv, across=FALSE, down=TRUE, diagonal=FALSE) mx <- gridadjacencymatrix(dimv, across=TRUE, down=FALSE, diagonal=FALSE) if(connect == 8) mxy <- gridadjacencymatrix(dimv, across=FALSE, down=FALSE, diagonal=TRUE) #' restrict to window if(anyNA(u)) { ok <- !is.na(u) u <- u[ok] if(want.Xpos) { #' adjust serial numbers Xpos <- cumsum(ok)[Xpos] backmap <- which(ok) } mx <- mx[ok,ok,drop=FALSE] my <- my[ok,ok,drop=FALSE] if(connect == 8) mxy <- mxy[ok,ok,drop=FALSE] if(is.im(sigma)) { px <- px[ok] py <- py[ok] if(connect == 8) pxy <- pxy[ok] } } else { ok <- TRUE backmap <- NULL if(is.im(sigma)) { px <- px[] py <- py[] if(connect == 8) pxy <- pxy[] } } #' construct iteration matrix if(connect == 4) { A <- px * mx + py * my } else { A <- px * (1 - 2 * py) * mx + py * (1 - 2 * px) * my + pxy * mxy } #' construct one-step transition probability matrix if(any(A < 0)) stop("Internal error: negative jump probabilities", call.=FALSE) totaljump <- rowSums(A) if(max(totaljump) > 1) stop("Internal error: jump probability exceeds 1", call.=FALSE) diag(A) <- 1 - totaljump #' debug #' k-step transition probabilities if(k > 1) { Ak <- A for(j in 2:k) Ak <- Ak %*% A } else Ak <- A k <- as.integer(k) Nstep <- as.integer(Nstep) Nblock <- Nstep/k Nrump <- Nstep - Nblock * k #' secret exit - return setup data only if(setuponly) return(list(Y=Y, u=u, Xpos=Xpos, backmap=backmap, sigma=sigma, A=A, Ak=Ak, k=k, Nstep=Nstep, Nblock=Nblock, Nrump=Nrump, dx=xstep, dy=ystep, dt=dt)) #' run U <- u Z <- Y if(!delayed) { if(!show) { for(iblock in 1:Nblock) U <- U %*% Ak } else { opa <- par(ask=FALSE) on.exit(par(opa)) each <- max(1, round(Nblock/60)) for(iblock in 1:Nblock) { U <- U %*% Ak if(iblock %% each == 0) { Z[] <- as.vector(U) f <- sqrt((iblock * k)/Nstep) main <- if(is.im(sigma)) paste(signif(f, 3), "* sigma") else paste("sigma =", signif(f * sigma, 3)) plot(Z, main=main) Sys.sleep(0.4) } } par(opa) } if(Nrump > 0) for(istep in 1:Nrump) U <- U %*% A } else { #' lagged arrivals used <- rep(FALSE, nX) contrib <- (weights %orifnull% rep(1,nX))/pixelarea if(!show) { for(iblock in 1:Nblock) { U <- U %*% Ak if(any(ready <- (!used & (iarrive <= iblock * k)))) { #' add points for(i in which(ready)) { j <- Xpos[i] U[j] <- U[j] + contrib[i] used[i] <- TRUE } } } } else { opa <- par(ask=FALSE) on.exit(par(opa)) each <- max(1, round(Nblock/60)) for(iblock in 1:Nblock) { U <- U %*% Ak if(any(ready <- (!used & (iarrive <= iblock * k)))) { #' add points for(i in which(ready)) { j <- Xpos[i] U[j] <- U[j] + contrib[i] used[i] <- TRUE } } if(iblock %% each == 0) { Z[] <- as.vector(U) f <- sqrt((iblock * k)/Nstep) main <- if(is.im(sigma)) paste(signif(f, 3), "* sigma") else paste("sigma =", signif(f * sigma, 3)) plot(Z, main=main) Sys.sleep(0.4) } } par(opa) } if(Nrump > 0) for(istep in 1:Nrump) U <- U %*% A } #' pack up Z[] <- as.vector(U) if(at == "points") Z <- Z[x] return(Z) } spatstat.explore/R/distcdf.R0000644000176200001440000001364314611073310015541 0ustar liggesusers#' #' distcdf.R #' #' cdf of |X1-X2| when X1,X2 are iid uniform in W, etc #' #' $Revision: 1.19 $ $Date: 2022/02/12 09:07:38 $ #' distcdf <- local({ distcdf <- function(W, V=W, ..., dW=1, dV=dW, nr=1024, regularise=TRUE, savedenom=FALSE, delta=NULL) { reflexive <- (missing(V) || is.null(V)) && (missing(dV) || is.null(dV)) diffuse <- is.owin(W) && is.owin(V) uniformW <- is.null(dW) || identical(dW, 1) uniformV <- is.null(dV) || identical(dV, 1) uniform <- uniformW && uniformV if(is.owin(W)) { W <- as.mask(as.owin(W), ...) dW <- as.im(dW, W=W) } else if(is.ppp(W)) { if(uniformW) { #' discrete uniform distribution on W dW <- pixellate(W, ...) } else { #' dW should be a weight or vector of weights if(!is.vector(dW) || !is.numeric(dW)) stop("If W is a point pattern, dW should be a vector of weights") if(length(dW) == 1L) { dW <- rep(dW, npoints(W)) } else stopifnot(length(dW) == npoints(W)) dW <- pixellate(W, weights=dW, ...) } } else stop("W should be a point pattern or a window") if(!reflexive) { if(is.owin(V)) { V <- as.mask(as.owin(V), ...) dV <- as.im(dV, W=V) } else if(is.ppp(V)) { if(uniformV) { #' discrete uniform distribution on V dV <- pixellate(V, ...) } else { #' dV should be a weight or vector of weights if(!is.vector(dV) || !is.numeric(dV)) stop("If V is a point pattern, dV should be a vector of weights") if(length(dV) == 1L) { dV <- rep(dV, npoints(V)) } else stopifnot(length(dV) == npoints(V)) dV <- pixellate(V, weights=dV, ...) } } else stop("V should be a point pattern or a window") if(!uniformV && min(dV) < 0) stop("Negative values encountered in dV") } #' compute if(diffuse && uniform) { #' uniform distributions on windows g <- if(reflexive) setcov(W, ...) else setcov(W, V, ...) } else { g <- if(reflexive) imcov(dW) else imcov(dW, dV) } r <- as.im(function(x,y) { sqrt(x^2 + y^2) }, g) pix <- with(r, max(xstep, ystep)) #' extract rvals <- as.vector(as.matrix(r)) gvals <- as.vector(as.matrix(g)) rmax <- max(rvals) #' histogram if(is.null(nr)) nr <- max(1024, 512 * ceiling(rmax/(pix*512))) rgrid <- seq(0, rmax, length=nr) ## dr <- rmax/(nr-1) h <- whist(rvals, breaks=rgrid, weights=gvals/sum(gvals)) ch <- c(0,cumsum(h)) #' regularise at very short distances if(regularise) { pix <- with(r, max(xstep, ystep)) suspect <- which(rgrid <= 10 * pix) reference <- which(rgrid <= 20 * pix) weigh <- pmin(seq_along(ch), min(reference))^2 fit <- lm(ch ~ I(rgrid^2) + I(rgrid^3) - 1, subset=reference, weights=weigh) ch[suspect] <- predict(fit)[suspect] ## enforce cdf properties ch[1] <- 0 ch <- cummax(ch) } #' ok result <- fv(data.frame(r=rgrid, f=ch), "r", quote(CDF(r)), "f", , range(rvals), c("r","%s(r)"), c("Interpoint distance","Cumulative probability"), fname="CDF") #' refine spacing, if required if(!is.null(delta)) result <- refine(result, delta) #' if(savedenom) { denomW <- integral(dW) denomV <- if(reflexive) denomW else integral(dV) attr(result, "denom") <- denomW * denomV } return(result) } refine <- function(H, delta=NULL, verbose=FALSE, force=FALSE) { ## H is CDF of pairwise distances ## Ensure H has spacing at most 'delta' check.1.real(delta) stopifnot(is.finite(delta) && (delta > 0)) rstep <- mean(diff(H$r)) inflate <- rstep/delta if(verbose) splat("delta=", delta, "rstep=", rstep, "inflate=", inflate) if(inflate > 1) { ## interpolate H if(verbose) { plot(H, xlim=c(0, R/2)) } H <- interpCDF(H, n=ceiling(inflate)) if(verbose) { plot(H, add=TRUE, xlim=c(0,R), col=2) splat("New rstep=", mean(diff(H$r))) } if(force) { ## force CDF to be nondecreasing and to start from 0 Hr <- H[["f"]] Hr[1] <- 0 Hr <- cummax(Hr) H[["f"]] <- Hr } } return(H) } interpCDF <- function(H, ..., method=c("smooth.spline", "loess"), delta=NULL, n=NULL) { ## H is CDF of pairwise distance ## Interpolate H by smoothing H(r)/r^2 method <- match.arg(method) rname <- fvnames(H, ".x") rold <- H[[rname]] rpos <- (rold > 0) if(is.null(delta) == is.null(n)) stop("Exactly one of the arguments 'delta' or 'n' should be given") if(!is.null(n)) { delta <- mean(diff(rold))/n } else { check.1.real(delta) stopifnot(delta > 0) } rnew <- seq(min(rold), max(rold), by=delta) ## initialise result newvalues <- vector(mode="list", length=ncol(H)) names(newvalues) <- colnames(H) newvalues[[rname]] <- rnew ## process each column of function values nama <- fvnames(H, ".a") for(ynam in nama) { yy <- H[[ynam]] ok <- is.finite(yy) & rpos yok <- yy[ok] rok <- rold[ok] switch(method, smooth.spline = { ss <- smooth.spline(x=rok, y=yok/rok^2, ...) yhat <- predict(ss, rnew)$y * rnew^2 }, loess = { df <- data.frame(x=rok, y=yok/rok^2) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, data.frame(x=rnew)) * rnew^2 }) newvalues[[ynam]] <- yhat } newH <- as.data.frame(newvalues) ## copy attributes anames <- setdiff(names(attributes(H)), c("row.names", "dim", "dimnames", "names", "tsp")) for(e in anames) attr(newH, e) <- attr(H, e) return(newH) } distcdf }) spatstat.explore/R/marktable.R0000644000176200001440000000307414611073310016060 0ustar liggesusers# # marktable.R # # Tabulate mark frequencies in neighbourhood of each point # for multitype point patterns # # $Revision: 1.8 $ $Date: 2022/06/09 01:07:54 $ # # Requested by Ian Robertson "marktable" <- function(X, R, N, exclude=TRUE, collapse=FALSE) { if(!inherits(X, c("ppp", "lpp", "pp3", "ppx"))) stop("X should be a point pattern (of class 'ppp', 'lpp', 'pp3' or 'ppx')") if(!is.marked(X, dfok=FALSE)) stop("point pattern has no marks") m <- marks(X) if(!is.factor(m)) stop("marks must be a factor") gotR <- !missing(R) && !is.null(R) gotN <- !missing(N) && !is.null(N) if(gotN == gotR) stop("Exactly one of the arguments N and R should be given") stopifnot(is.logical(exclude) && length(exclude) == 1) if(gotR) { stopifnot(is.numeric(R) && length(R) == 1 && R > 0) #' identify close pairs p <- closepairs(X,R,what="indices") pi <- p$i pj <- p$j if(!exclude) { #' add identical pairs n <- X$n pi <- c(pi, 1:n) pj <- c(pj, 1:n) } } else { stopifnot(is.numeric(N) && length(N) == 1) ii <- seq_len(npoints(X)) nn <- nnwhich(X, k=1:N) if(N == 1) nn <- matrix(nn, ncol=1) if(!exclude) nn <- cbind(ii, nn) pi <- as.vector(row(nn)) pj <- as.vector(nn) } #' tabulate if(!collapse) { ## table for each point i <- factor(pi, levels=seq_len(npoints(X))) mj <- m[pj] mat <- table(point=i, mark=mj) } else { #' table by type mi <- m[pi] mj <- m[pj] mat <- table(point=mi, neighbour=mj) } return(mat) } spatstat.explore/R/twostage.R0000644000176200001440000003000114611073311015742 0ustar liggesusers## ## twostage.R ## ## Two-stage Monte Carlo tests and envelopes ## ## $Revision: 1.19 $ $Date: 2022/05/23 02:33:06 $ ## bits.test <- function(X, ..., exponent=2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsim, reuse=FALSE, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Balanced Independent Two-stage Test") } dg.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse=TRUE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { check.1.integer(nsim) stopifnot(nsim >= 2) if(!missing(nsimsub) && (nsimsub < 1 || !relatively.prime(nsim, nsimsub))) stop("nsim and nsimsub must be relatively prime") twostage.test(X, ..., exponent=exponent, nsim=nsim, nsimsub=nsimsub, reuse=reuse, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testblurb="Dao-Genton adjusted goodness-of-fit test") } twostage.test <- function(X, ..., exponent=2, nsim=19, nsimsub=nsim, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate=FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, badXfatal=TRUE, testblurb="Two-stage Monte Carlo test") { Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- inherits(X, c("ppm", "kppm", "dppm", "lppm", "slrm")) check.1.integer(nsim) check.1.integer(nsimsub) stopifnot(nsim >= 2) stopifnot(nsimsub >= 2) ## first-stage p-value if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., nsim=if(reuse) nsim else nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=savepatterns || reuse, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) pX <- tX$p.value ## check special case afortiori <- !interpolate && !reuse && (nsimsub < nsim) && (pX == (1/(nsim+1)) || pX == 1) if(afortiori) { ## result is determined padj <- pX pY <- NULL } else { ## result is not yet determined if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., nsim=nsim, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savefuns=savefuns, savepatterns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ## apply same test to each simulated pattern if(verbose) cat(paste("Running second-stage tests on", nsim, "simulated patterns... ")) pY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] ## if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., nsim=nsimsub, alternative=alternative, leaveout=leaveout, interpolate=interpolate, exponent=exponent, savepatterns=TRUE, verbose=FALSE, badXfatal=FALSE, envir.simul=env.here) pY[i] <- tYi$p.value } pY <- sort(pY) ## compute adjusted p-value padj <- (1 + sum(pY <= pX))/(1+nsim) } # pack up method <- tX$method method <- c(testblurb, paste("based on", method[1L]), paste("First stage:", method[2L]), method[-(1:2)], if(afortiori) { paren(paste("Second stage was omitted: p0 =", pX, "implies p-value =", padj)) } else if(reuse) { paste("Second stage: nested, ", nsimsub, "simulations for each first-stage simulation") } else { paste("Second stage:", nsim, "*", nsimsub, "nested simulations independent of first stage") } ) names(pX) <- "p0" result <- structure(list(statistic = pX, p.value = padj, method = method, data.name = Xname), class="htest") attr(result, "rinterval") <- attr(tX, "rinterval") attr(result, "pX") <- pX attr(result, "pY") <- pY if(savefuns || savepatterns) result <- hasenvelope(result, attr(tX, "envelope")) return(result) } dg.envelope <- function(X, ..., nsim=19, nsimsub=nsim-1, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.envelope(X, ..., nsim=nsim, nsimsub=nsimsub, reuse=TRUE, nrank=nrank, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testlabel="bits") } bits.envelope <- function(X, ..., nsim=19, nrank=1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { twostage.envelope(X, ..., nsim=nsim, nsimsub=nsim, reuse=FALSE, nrank=nrank, alternative=match.arg(alternative), leaveout=leaveout, interpolate=interpolate, savefuns=savefuns, savepatterns=savepatterns, verbose=verbose, testlabel="bits") } twostage.envelope <- function(X, ..., nsim=19, nsimsub=nsim, nrank=1, alternative=c("two.sided", "less", "greater"), reuse=FALSE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE, badXfatal=TRUE, testlabel="twostage") { # Xname <- short.deparse(substitute(X)) alternative <- match.arg(alternative) env.here <- sys.frame(sys.nframe()) Xismodel <- inherits(X, c("ppm", "kppm", "dppm", "lppm", "slrm")) check.1.integer(nsim) check.1.integer(nsimsub) stopifnot(nsim >= 2) stopifnot(nsimsub >= 1) ############## first stage ################################## if(verbose) cat("Applying first-stage test to original data... ") tX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=if(reuse) nsim else nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) if(verbose) cat("Done.\n") envX <- attr(tX, "envelope") if(!reuse) { if(verbose) cat("Repeating first-stage test... ") tXX <- envelopeTest(X, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, nsim=nsim, nrank=nrank, exponent=Inf, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, badXfatal=badXfatal, envir.simul=env.here) ## extract simulated patterns Ylist <- attr(attr(tXX, "envelope"), "simpatterns") } else { Ylist <- attr(attr(tX, "envelope"), "simpatterns") } if(verbose) cat("Done.\n") ############## second stage ################################# ## apply same test to each simulated pattern if(verbose) cat(paste("Running tests on", nsim, "simulated patterns... ")) pvalY <- numeric(nsim) for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Yi <- Ylist[[i]] # if X is a model, fit it to Yi. Otherwise the implicit model is CSR. if(Xismodel) Yi <- update(X, Yi) tYi <- envelopeTest(Yi, ..., alternative=alternative, leaveout=leaveout, interpolate = interpolate, save.interpolant = FALSE, nsim=nsimsub, nrank=nrank, exponent=Inf, savepatterns=TRUE, verbose=FALSE, badXfatal=FALSE, envir.simul=env.here) pvalY[i] <- tYi$p.value } ## Find critical deviation if(!interpolate) { ## find critical rank 'l' rankY <- pvalY * (nsimsub + 1) twostage.rank <- orderstats(rankY, k=nrank) if(verbose) cat(paste0(testlabel, ".rank"), "=", twostage.rank, fill=TRUE) ## extract deviation values from top-level simulation simdev <- attr(tX, "statistics")[["sim"]] ## find critical deviation twostage.crit <- orderstats(simdev, decreasing=TRUE, k=twostage.rank) if(verbose) cat(paste0(testlabel, ".crit"), "=", twostage.crit, fill=TRUE) } else { ## compute estimated cdf of t fhat <- attr(tX, "density")[c("x", "y")] fhat$z <- with(fhat, cumsum(y)/sum(y)) # 'within' upsets package checker ## find critical (second stage) p-value pcrit <- orderstats(pvalY, k=nrank) ## compute corresponding upper quantile of estimated density of t twostage.crit <- with(fhat, { min(x[z >= 1 - pcrit]) }) } ## make fv object, for now refname <- if("theo" %in% names(envX)) "theo" else "mmean" fname <- attr(envX, "fname") result <- (as.fv(envX))[, c(fvnames(envX, ".x"), fvnames(envX, ".y"), refname)] refval <- envX[[refname]] ## newdata <- data.frame(hi=refval + twostage.crit, lo=refval - twostage.crit) newlabl <- c(makefvlabel(NULL, NULL, fname, "hi"), makefvlabel(NULL, NULL, fname, "lo")) alpha <- nrank/(nsim+1) alphatext <- paste0(100*alpha, "%%") newdesc <- c(paste("upper", alphatext, "critical boundary for %s"), paste("lower", alphatext, "critical boundary for %s")) switch(alternative, two.sided = { }, less = { newdata$hi <- Inf newlabl[1L] <- "infinity" newdesc[1L] <- "infinite upper limit" }, greater = { newdata$lo <- -Inf newlabl[2L] <- "infinity" newdesc[2L] <- "infinite lower limit" }) result <- bind.fv(result, newdata, newlabl, newdesc) fvnames(result, ".") <- rev(fvnames(result, ".")) fvnames(result, ".s") <- c("lo", "hi") if(savefuns || savepatterns) result <- hasenvelope(result, envX) return(result) } spatstat.explore/R/density.ppp.R0000644000176200001440000011546114611073310016377 0ustar liggesusers# # density.ppp.R # # Method for 'density' for point patterns # # $Revision: 1.136 $ $Date: 2024/01/29 07:25:10 $ # # ksmooth.ppp <- function(x, sigma, ..., edge=TRUE) { # .Deprecated("density.ppp", package="spatstat") # density.ppp(x, sigma, ..., edge=edge) # } density.ppp <- local({ density.ppp <- function(x, sigma=NULL, ..., weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, wtype=c("value", "multiplicity"), kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE, sameas) { verifyclass(x, "ppp") output <- pickoption("output location type", at, c(pixels="pixels", points="points")) ## .............. bandwidth ......................................... if(!missing(sameas) && !is.null(kerdata <- attr(sameas, "kerdata"))) { ## copy smoothing parameters from 'sameas' kernel <- kerdata$kernel %orifnull% kernel sigma <- kerdata$sigma varcov <- kerdata$varcov cutoff <- kerdata$cutoff scalekernel <- kerdata$scalekernel %orifnull% is.character(kernel) adjust <- 1 } else { ## usual case: determine the smoothing parameters from the call if(!identical(kernel, "gaussian")) { validate2Dkernel(kernel) if(verbose && scalekernel && (is.function(sigma) || (is.null(sigma) && is.null(varcov)))) warning("Bandwidth selection will be based on Gaussian kernel") } kerdata <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, x=x, adjust=adjust) sigma <- kerdata$sigma varcov <- kerdata$varcov kerdata$kernel <- kernel } ## Full set of smoothing attributes to be saved in the result ## using the idiom ## attributes(result)[names(remember)] <- remember remember <- list(sigma=sigma, varcov=varcov, kernel=kernel, kerdata=kerdata) ## ............. weird trivial case .............................. if(any(sidelengths(Frame(x)) == 0)) { ## pixels will have zero area val <- npoints(x)/0 # Inf or NaN result <- as.im(val, W=Frame(x), ...) attributes(result)[names(remember)] <- remember return(result) } ## ............... weights ........................................... weights <- pointweights(x, weights=weights, parent=parent.frame(), dfok=TRUE) ## ............... standard error .................................... if(se) { ## compute standard error wtype <- match.arg(wtype) SE <- denspppSEcalc(x, sigma=sigma, varcov=varcov, kernel=kernel, ..., weights=weights, wtype=wtype, edge=edge, at=output, leaveoneout=leaveoneout, adjust=adjust, diggle=diggle) if(positive) SE <- posify(SE) } ## ............... infinite bandwidth .................................... if(bandwidth.is.infinite(sigma)) { #' uniform estimate nx <- npoints(x) single <- is.null(dim(weights)) totwt <- if(is.null(weights)) nx else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt W <- Window(x) A <- area.owin(W) switch(output, pixels = { E <- solapply(totwt/A, as.im, W=W, ...) names(E) <- colnames(weights) if(single) E <- E[[1L]] }, points = { numerator <- rep(totwt, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(leaveoneout && edge) numerator <- numerator - (weights %orifnull% 1) E <- numerator/A if(!single) colnames(E) <- colnames(weights) }) result <- if(se) list(estimate=E, SE=SE) else E attributes(result)[names(remember)] <- remember return(result) } if(output == "points") { # VALUES AT DATA POINTS ONLY result <- densitypointsEngine(x, sigma, varcov=varcov, kernel=kernel, scalekernel=scalekernel, weights=weights, edge=edge, leaveoneout=leaveoneout, diggle=diggle, ...) if(verbose && !is.null(uhoh <- attr(result, "warnings"))) { switch(uhoh, underflow=warning("underflow due to very small bandwidth"), warning(uhoh)) } ## constrain values to be positive if(positive) result <- posify(result) if(se) result <- list(estimate=result, SE=SE) attributes(result)[names(remember)] <- remember return(result) } # VALUES AT PIXELS if(!edge) { # no edge correction edg <- NULL raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } else if(!diggle) { # edge correction e(u) both <- second.moment.calc(x, sigma, what="smoothedge", ..., kernel=kernel, scalekernel=scalekernel, weights=weights, varcov=varcov) raw <- divide.by.pixelarea(both$smooth) edg <- both$edge ## Math.im / Math.imlist not yet working smo <- imagelistOp(raw, edg, "/") } else { # edge correction e(x_i) edg <- second.moment.calc(x, sigma, what="edge", ..., scalekernel=scalekernel, kernel=kernel, varcov=varcov) wi <- 1/safelookup(edg, x, warn=FALSE) wi[!is.finite(wi)] <- 0 # edge correction becomes weight attached to points if(is.null(weights)) { newweights <- wi } else if(is.matrix(weights) || is.data.frame(weights)) { stopifnot(nrow(weights) == npoints(x)) newweights <- weights * wi } else { stopifnot(length(weights) == npoints(x)) newweights <- weights * wi } raw <- second.moment.calc(x, sigma, what="smooth", ..., kernel=kernel, scalekernel=scalekernel, weights=newweights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } result <- if(is.im(smo)) smo[x$window, drop=FALSE] else solapply(smo, "[", i=x$window, drop=FALSE) # internal use only spill <- resolve.1.default(list(spill=FALSE), list(...)) if(spill) return(list(result=result, sigma=sigma, varcov=varcov, raw = raw, edg=edg, remember=remember)) # constrain values to be positive if(positive) result <- posify(result) # normal return attributes(result)[names(remember)] <- remember if(se) { result <- list(estimate=result, SE=SE) attributes(result)[names(remember)] <- remember } return(result) } divideimage <- function(numer, denom) eval.im(numer/denom) posify <- function(x, eps=.Machine$double.xmin) { force(eps) # scalpel if(is.im(x)) return(eval.im(pmax(eps, x))) if(inherits(x, "solist")) return(solapply(x, posify, eps=eps)) if(is.numeric(x)) return(pmax(eps, x)) # data frame or list if(is.list(x) && all(sapply(x, is.numeric))) return(lapply(x, posify, eps=eps)) warning("Internal error: posify did not recognise data format") return(x) } divide.by.pixelarea <- function(x) { if(is.im(x)) { x$v <- x$v/(x$xstep * x$ystep) } else { for(i in seq_along(x)) x[[i]]$v <- with(x[[i]], v/(xstep * ystep)) } return(x) } denspppSEcalc <- function(x, sigma, varcov, ..., kernel, weights, wtype, edge, diggle, at, leaveoneout=TRUE, gauss.is.special=TRUE, debug=FALSE) { ## Calculate standard error, rather than estimate nx <- npoints(x) single <- is.null(dim(weights)) weightspower <- if(is.null(weights)) NULL else switch(wtype, value = weights^2, multiplicity = weights) if(!is.null(weights) && wtype == "multiplicity" && min(weights) < 0) stop("Negative weights are not permitted when wtype='multiplicity'", call.=FALSE) if(bandwidth.is.infinite(sigma)) { #' special case - uniform totwtpower <- if(is.null(weights)) nx else if(single) sum(weightspower) else colSums(weightspower) if(!edge) { ## infinite bandwidth without edge correction: estimate = variance = 0 totwtpower <- 0 * totwtpower } W <- Window(x) A <- area.owin(W) switch(at, pixels = { V <- solapply(totwtpower/A, as.im, W=W, ...) names(V) <- colnames(weights) if(single) V <- V[[1L]] }, points = { numerator <- rep(totwtpower, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(edge && leaveoneout) numerator <- numerator - (weightspower %orifnull% 1) V <- numerator/A if(!single) colnames(V) <- colnames(weights) }) return(sqrt(V)) } ## Usual case: sigma or vcov is finite ## Calculations involve the squared kernel specialGauss <- gauss.is.special && identical(kernel, "gaussian") if(!specialGauss) { ## The square of the kernel will be computed inside second.moment.engine kerpow <- 2 tau <- sigma taumat <- varcov varconst <- 1 } else { ## Use the fact that the square of the Gaussian kernel ## is a rescaled Gaussian kernel kerpow <- 1 tau <- taumat <- NULL if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(ensure2vector(sigma))) tau <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) taumat <- varcov/2 } } ## Calculate edge correction weights if(edge) { ## convolution of kernel with window edgeim <- second.moment.calc(x, sigma, what="edge", ..., varcov=varcov) if(diggle || at == "points") { edgeX <- safelookup(edgeim, x, warn=FALSE) invmassX <- 1/edgeX invmassX[!is.finite(invmassX)] <- 0 } edgeim <- edgeim[Window(x), drop=FALSE] } ## Calculate variance of sum of weighted contributions dataVarianceWeights <- if(!edge) { ## no edge correction weightspower } else if(!diggle) { ## uniform edge correction e(u) weightspower } else { ## Jones-Diggle edge correction e(x_i) if(is.null(weightspower)) invmassX^2 else (weightspower * invmassX^2) } V <- density(x, sigma=tau, varcov=taumat, ..., kerpow=kerpow, weights=dataVarianceWeights, at=at, leaveoneout=leaveoneout, edge=FALSE, diggle=FALSE) if(edge && !diggle) { ## uniform edge correction e(u): rescale V <- if(at == "points") V * invmassX^2 else imagelistOp(V, edgeim^2, "/") } if(varconst != 1) V <- V * varconst return(sqrt(V)) } density.ppp }) densitypointsEngine <- function(x, sigma=NULL, ..., kernel="gaussian", scalekernel=is.character(kernel), kerpow = 1, weights=NULL, edge=TRUE, varcov=NULL, leaveoneout=TRUE, diggle=FALSE, sorted=FALSE, spill=FALSE, cutoff=NULL, debug=FALSE) { stopifnot(is.logical(leaveoneout)) validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") nx <- npoints(x) if(isgauss) { ## constant factor in Gaussian density if(is.null(varcov)) { gaussconst <- 1/(2 * pi * sigma^2) } else { detSigma <- det(varcov) Sinv <- solve(varcov) gaussconst <- 1/(2 * pi * sqrt(detSigma)) } } if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate single <- is.null(dim(weights)) totwt <- if(is.null(weights)) nx else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt W <- Window(x) A <- area.owin(W) numerator <- rep(totwt, each=nx) if(!single) numerator <- matrix(numerator, nrow=nx) if(leaveoneout && edge) numerator <- numerator - (weights %orifnull% 1) result <- numerator/A if(!single) colnames(result) <- colnames(weights) return(result) } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance if(debug) cat(paste("cutoff=", cutoff, "\n")) if(leaveoneout && nx > 1) { ## ensure each point has its closest neighbours within the cutoff nndmax <- maxnndist(x) cutoff <- max(2 * nndmax, cutoff) if(debug) cat(paste("adjusted cutoff=", cutoff, "\n")) } # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(x) || length(weights) == 1L) if(length(weights) == 1L) weights <- rep(weights, nx) } # evaluate edge correction weights at points if(edge) { win <- x$window if(isgauss && is.null(varcov) && win$type == "rectangle") { # evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- x$x yy <- x$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { edg <- second.moment.calc(x, sigma=sigma, kernel=kernel, scalekernel=scalekernel, what="edge", varcov=varcov, ...) edgeweight <- safelookup(edg, x, warn=FALSE) } if(diggle) { # Diggle edge correction # edgeweight is attached to each point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } if(isgauss && spatstat.options("densityTransform") && spatstat.options("densityC")) { ## .................. experimental C code ..................... if(debug) cat('Using experimental code G*denspt\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) xx <- x$x yy <- x$y gaussconstpow <- gaussconst^kerpow ## transform to standard coordinates if(is.null(varcov)) { sigroot2 <- sqrt(2/kerpow) * sigma xx <- xx/sigroot2 yy <- yy/sigroot2 } else { xy <- cbind(xx, yy) %*% matrixsqrt(Sinv * (kerpow/2)) xx <- xy[,1L] yy <- xy[,2L] sorted <- FALSE } ## cutoff in standard coordinates sd <- sigma %orifnull% sqrt(min(eigen(varcov)$values)) sdscale <- sqrt(2/kerpow) * sd cutoff <- cutoff/sdscale ## sort into increasing order of x coordinate (required by C code) if(!sorted) { oo <- fave.order(xx) xx <- xx[oo] yy <- yy[oo] } if(is.null(weights)) { zz <- .C(SE_Gdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * gaussconstpow } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C(SE_Gwtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result result <- result * gaussconstpow } else { ## matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C(SE_Gwtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), weight = as.double(wtsort[,j]), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } result <- result * gaussconstpow } } else if(isgauss && (kerpow %in% c(1,2)) && spatstat.options("densityC")) { # .................. C code ........................... if(debug) cat('Using standard C code *denspt.\n') npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) squared <- (kerpow == 2) if(squared && debug) cat('Squared kernel.\n') # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] } if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C(SE_denspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), squared = as.integer(squared), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[oo] zz <- .C(SE_wtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), weight = as.double(wtsort), squared = as.integer(squared), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C(SE_wtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sigma), weight = as.double(wtsort[,j]), squared = as.integer(squared), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } else { # anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SE_adenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), squared = as.integer(squared), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1L) { # vector of weights wtsort <- if(sorted) weights else weights[oo] zz <- .C(SE_awtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort), squared = as.integer(squared), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C(SE_awtdenspt, nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort[,j]), squared = as.integer(squared), result = as.double(double(npts)), PACKAGE="spatstat.explore") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } } else { # ..... interpreted code ......................................... if(debug) cat('Using interpreted code.\n') close <- closepairs(x, cutoff) i <- close$i j <- close$j d <- close$d npts <- npoints(x) result <- if(k == 1L) numeric(npts) else matrix(, npts, k) # evaluate contribution from each close pair (i,j) if(isgauss) { if(is.null(varcov)) { contrib <- gaussconst * exp(-d^2/(2 * sigma^2)) } else { ## anisotropic kernel dx <- close$dx dy <- close$dy contrib <- gaussconst * exp(-(dx * (dx * Sinv[1L,1L] + dy * Sinv[1L,2L]) + dy * (dx * Sinv[2L,1L] + dy * Sinv[2L,2L]))/2) } } else { contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) } ## raise kernel density value to a power (for variance calculations etc) if(kerpow != 1) { if(debug) { if(kerpow == 2) cat('Squaring the kernel values\n') else splat('Raising kernel values to', ordinal(kerpow), 'power') } contrib <- contrib^kerpow } ## sum (weighted) contributions ## query point i, data point j ifac <- factor(i, levels=1:npts) if(is.null(weights)) { result <- tapplysum(contrib, list(ifac)) } else if(k == 1L) { wcontrib <- contrib * weights[j] result <- tapplysum(wcontrib, list(ifac)) } else { for(kk in 1:k) { wcontribkk <- contrib * weights[j, kk] result[,kk] <- tapplysum(wcontribkk, list(ifac)) } } # } # ----- contribution from point itself ---------------- if(!leaveoneout) { #' add contribution from point itself if(isgauss) { self <- gaussconst } else { self <- evaluate2Dkernel(kernel, 0, 0, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) } ## raise kernel density value to a power (for variance calculations etc) if(kerpow != 1) self <- self^kerpow ## weighted if(!is.null(weights)) self <- self * weights result <- result + self } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # ............. validate ................................. npts <- npoints(x) if(k == 1L) { result <- as.numeric(result) if(length(result) != npts) stop(paste("Internal error: incorrect number of lambda values", "in leave-one-out method:", "length(lambda) = ", length(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(is.na(result)) stop(paste("Internal error:", nwrong, "NA or NaN", ngettext(nwrong, "value", "values"), "generated in leave-one-out method")) } } else { if(ncol(result) != k) stop(paste("Internal error: incorrect number of columns returned:", ncol(result), "!=", k)) colnames(result) <- weightnames if(nrow(result) != npts) stop(paste("Internal error: incorrect number of rows of lambda values", "in leave-one-out method:", "nrow(lambda) = ", nrow(result), "!=", npts, "= npoints")) if(anyNA(result)) { nwrong <- sum(!complete.cases(result)) stop(paste("Internal error:", nwrong, ngettext(nwrong, "row", "rows"), "of NA values generated in leave-one-out method")) } } if(spill) return(list(result=result, sigma=sigma, varcov=varcov, edg=edgeweight)) # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "kerpow") <- kerpow # return(result) } resolve.2D.kernel <- function(..., sigma=NULL, varcov=NULL, x, mindist=NULL, adjust=1, bwfun=NULL, allow.zero=FALSE) { if(is.function(sigma)) { bwfun <- sigma sigma <- NULL } if(is.null(sigma) && is.null(varcov) && !is.null(bwfun)) { #' call bandwidth selection function force(x) bw <- do.call.matched(bwfun, resolve.defaults(list(X=quote(x)), list(...))) #' interpret the result as either sigma or varcov if(inherits(bw, "bw.optim")) { ## bw is an object representing an optimised scalar tem <- attr(bw, "template") if(is.null(tem)) { ## usual case: interpret bw as a scalar bandwidth bw <- as.numeric(bw) } else { ## interpret as a scale multiple of the template (usually a matrix) h <- as.numeric(bw) exponent <- attr(bw, "exponent") %orifnull% 1 bw <- (h^exponent) * tem } } #' now process 'bw' as a numeric object if(!is.numeric(bw)) stop("bandwidth selector returned a non-numeric result") if(length(bw) %in% c(1L,2L)) { sigma <- as.numeric(bw) if(!all(sigma > 0)) { gripe <- "bandwidth selector returned negative value(s)" if(allow.zero) warning(gripe) else stop(gripe) } } else if(is.matrix(bw) && nrow(bw) == 2 && ncol(bw) == 2) { varcov <- bw if(!all(eigen(varcov)$values > 0)) stop("bandwidth selector returned matrix with negative eigenvalues") } else stop("bandwidth selector did not return a matrix or numeric value") } sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if(sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1L,2L)) if(!allow.zero) stopifnot(all(sigma > 0)) } if(varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) # reconcile ngiven <- varcov.given + sigma.given switch(ngiven+1L, { # default w <- x$window sigma <- (1/8) * shortside(as.rectangle(w)) }, { if(sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if(!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # apply adjustments if(!is.null(sigma)) sigma <- adjust * sigma if(!is.null(varcov)) varcov <- (adjust^2) * varcov # sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd uhoh <- if(!is.null(mindist) && cutoff < mindist) "underflow" else NULL result <- list(sigma=sigma, varcov=varcov, cutoff=cutoff, warnings=uhoh) return(result) } densitycrossEngine <- function(Xdata, Xquery, sigma=NULL, ..., kernel="gaussian", scalekernel=is.character(kernel), weights=NULL, edge=TRUE, varcov=NULL, diggle=FALSE, sorted=FALSE, cutoff=NULL, se=FALSE, kerpow=1) { validate2Dkernel(kernel) if(is.character(kernel)) kernel <- match2DkernelName(kernel) isgauss <- identical(kernel, "gaussian") && scalekernel if(se) warning("Standard errors are not yet supported", call.=FALSE) if(kerpow != 1) warning("Powers of the kernel are not yet supported", call.=FALSE) if(length(weights) == 0 || (!is.null(dim(weights)) && nrow(weights) == 0)) weights <- NULL # validate weights if(is.null(weights)) { k <- 1L } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(Xdata)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1L stopifnot(length(weights) == npoints(Xdata) || length(weights) == 1L) } #' infinite bandwidth if(bandwidth.is.infinite(sigma)) { #' uniform estimate single <- is.null(dim(weights)) totwt <- if(is.null(weights)) npoints(Xdata) else if(single) sum(weights) else colSums(weights) if(!edge) totwt <- 0 * totwt lam <- totwt/area.owin(Window(Xdata)) result <- if(single) rep(lam, npoints(Xquery)) else matrix(lam, npoints(Xquery), length(lam), byrow=TRUE, dimnames=list(NULL, colnames(weights))) return(result) } # evaluate edge correction weights at points if(edge) { win <- Xdata$window if(diggle) { ## edge correction weights are attached to data points xedge <- Xdata } else { ## edge correction weights are applied at query points xedge <- Xquery if(!all(inside.owin(Xquery, , win))) stop(paste("Edge correction is not possible:", "some query points lie outside the data window"), call.=FALSE) } if(isgauss && is.null(varcov) && win$type == "rectangle") { ## evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- xedge$x yy <- xedge$y xprob <- pnorm(xr[2L], mean=xx, sd=sigma) - pnorm(xr[1L], mean=xx, sd=sigma) yprob <- pnorm(yr[2L], mean=yy, sd=sigma) - pnorm(yr[1L], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { ## evaluate edge correction weights edg <- second.moment.calc(Xdata, what="edge", kernel=kernel, scalekernel=scalekernel, sigma=sigma, varcov=varcov) edgeweight <- safelookup(edg, xedge, warn=FALSE) } if(diggle) { ## Diggle edge correction ## edgeweight is attached to each data point if(is.null(weights)) { k <- 1L weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } ## cutoff distance (beyond which the kernel value is treated as zero) ## NB: input argument 'cutoff' is either NULL or ## an absolute distance (if scalekernel=FALSE) ## a number of standard deviations (if scalekernel=TRUE) cutoff <- cutoff2Dkernel(kernel, sigma=sigma, varcov=varcov, scalekernel=scalekernel, cutoff=cutoff, fatal=TRUE) ## cutoff is now an absolute distance ndata <- npoints(Xdata) nquery <- npoints(Xquery) if(!isgauss) { ## .................. non-Gaussian kernel ........................ close <- crosspairs(Xdata, Xquery, cutoff) contrib <- evaluate2Dkernel(kernel, close$dx, close$dy, sigma=sigma, varcov=varcov, scalekernel=scalekernel, ...) ## sum the (weighted) contributions i <- close$i j <- close$j jfac <- factor(j, levels=seq_len(nquery)) if(is.null(weights)) { result <- tapplysum(contrib, list(jfac)) } else if(k == 1L) { wcontrib <- contrib * weights[i] result <- tapplysum(wcontrib, list(jfac)) } else { result <- matrix(, nquery, k) for(kk in 1:k) { wcontribkk <- contrib * weights[i, kk] result[,kk] <- tapplysum(wcontribkk, list(jfac)) } } } else { ## ................. Gaussian kernel ................... result <- if(k == 1L) numeric(nquery) else matrix(, nquery, k) ## coordinates xq <- Xquery$x yq <- Xquery$y xd <- Xdata$x yd <- Xdata$y if(!sorted) { ## sort into increasing order of x coordinate (required by C code) ooq <- fave.order(Xquery$x) xq <- xq[ooq] yq <- yq[ooq] ood <- fave.order(Xdata$x) xd <- xd[ood] yd <- yd[ood] } if(is.null(varcov)) { ## isotropic kernel if(is.null(weights)) { zz <- .C(SE_crdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), sig = as.double(sigma), squared = as.integer(FALSE), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { wtsort <- if(sorted) weights else weights[ood] zz <- .C(SE_wtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), sig = as.double(sigma), squared = as.integer(FALSE), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C(SE_wtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), sig = as.double(sigma), squared = as.integer(FALSE), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } else { ## anisotropic kernel detSigma <- det(varcov) Sinv <- solve(varcov) flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C(SE_acrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), squared = as.integer(FALSE), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } else if(k == 1L) { ## vector of weights wtsort <- if(sorted) weights else weights[ood] zz <- .C(SE_awtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), squared = as.integer(FALSE), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result <- zz$result else result[ooq] <- zz$result } else { ## matrix of weights wtsort <- if(sorted) weights else weights[ood, ] for(j in 1:k) { zz <- .C(SE_awtcrdenspt, nquery = as.integer(nquery), xq = as.double(xq), yq = as.double(yq), ndata = as.integer(ndata), xd = as.double(xd), yd = as.double(yd), wd = as.double(wtsort[,j]), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), squared = as.integer(FALSE), result = as.double(double(nquery)), PACKAGE="spatstat.explore") if(sorted) result[,j] <- zz$result else result[ooq,j] <- zz$result } colnames(result) <- weightnames } } } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } bandwidth.is.infinite <- function(sigma) { sigma <- as.numeric(sigma) return((length(sigma) > 0) && all(sigma == Inf)) } density.ppplist <- density.splitppp <- function(x, ..., weights=NULL, se=FALSE) { if(is.null(weights) || inherits(weights, c("im", "funxy", "expression"))) weights <- rep(list(weights), length(x)) y <- mapply(density.ppp, x=x, weights=weights, MoreArgs=list(se=se, ...), SIMPLIFY=FALSE) if(!se) return(as.solist(y, demote=TRUE)) y.est <- lapply(y, getElement, name="estimate") y.se <- lapply(y, getElement, name="SE") z <- list(estimate = as.solist(y.est, demote=TRUE), SE = as.solist(y.se, demote=TRUE)) return(z) } spatstat.explore/R/Kinhom.R0000644000176200001440000003626614633203032015354 0ustar liggesusers# # Kinhom.S Estimation of K function for inhomogeneous patterns # # $Revision: 1.104 $ $Date: 2024/06/09 00:00:07 $ # # Kinhom() compute estimate of K_inhom # # # Reference: # Non- and semiparametric estimation of interaction # in inhomogeneous point patterns # A.Baddeley, J.Moller, R.Waagepetersen # Statistica Neerlandica 54 (2000) 329--350. # # -------- functions ---------------------------------------- # Kinhom() compute estimate of K # using various edge corrections # # Kwtsum() internal routine for border correction # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # lambda vector of intensity values for points of X # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # (denominator = sum of weights of points) # # bord.modif: K function estimated by border method # (denominator = area of eroded window) # # ------------------------------------------------------------------------ "Linhom" <- function(X, ..., correction) { if(missing(correction)) correction <- NULL K <- Kinhom(X, ..., correction=correction) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(L[inhom](r)), c("L", "inhom"), names(K), new.labl=attr(K, "labl")) attr(L, "labl") <- attr(K, "labl") attr(L, "dangerous") <- attr(K, "dangerous") # return(L) } "Kinhom"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) rfixed <- !missing(r) || !missing(breaks) # determine basic parameters W <- X$window npts <- npoints(X) areaW <- area(W) diamW <- diameter(W) rmaxdefault <- rmax.rule("K", W, npts/areaW) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # match corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "bord.modif", "isotropic", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) # best.wanted <- ("best" %in% correction) ## replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) ## retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### # DETERMINE WEIGHTS AND VALIDATE # # The matrix 'lambda2' or 'reciplambda2' is sufficient information # unless we want the border correction. lambda2.given <- !is.null(lambda2) || !is.null(reciplambda2) lambda2.suffices <- !any(correction %in% c("border", "bord.modif")) ## Arguments that are 'dangerous' for envelope, if fixed dangerous <- c("lambda", "reciplambda", "lambda2", "reciplambda2") danger <- TRUE # Use matrix of weights if it was provided and if it is sufficient if(lambda2.suffices && lambda2.given) { if(!is.null(reciplambda2)) { check.nmatrix(reciplambda2, npts, mname="reciplambda2") validate.weights(reciplambda2, recip=TRUE) } else { check.nmatrix(lambda2, npts, mname="lambda2") validate.weights(lambda2) reciplambda2 <- 1/lambda2 } # renormalise if(renormalise && npts > 0) { check.1.real(normpower) stopifnot(normpower %in% 1:2) rlam2 <- reciplambda2 if(!diagonal) diag(rlam2) <- 0 renorm.factor <- (areaW^2/sum(rlam2))^(normpower/2) } } else { ## Vector lambda or reciplambda is required a <- resolve.reciplambda(X, lambda=lambda, reciplambda=reciplambda, ..., sigma=sigma, varcov=varcov, leaveoneout=leaveoneout, update=update, check=TRUE) reciplambda <- a$reciplambda danger <- a$danger dangerous <- a$dangerous # renormalise if(renormalise && npts > 0) { check.1.real(normpower) stopifnot(normpower %in% 1:2) if(!diagonal && normpower == 2) { renorm.factor <- (areaW^2)/(sum(reciplambda)^2 - sum(reciplambda^2)) } else { renorm.factor <- (areaW/sum(reciplambda))^normpower } } } # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border correction and no correction # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even && !lambda2.given large.n <- (npts >= nlarge) # demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked.fast <- (correction.given && correction.fast) || (nlarge.given && large.n.trigger) if(!can.do.fast && asked.fast) { whynot <- if(!(breaks$even)) "r values not evenly spaced" else if(!missing(lambda)) "matrix lambda2 was given" else NULL warning(paste("cannot use efficient code", whynot, sep="; ")) } if(will.do.fast) { ## Compute Kinhom using fast algorithm(s) ## determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { ## some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } ## restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) ## border method if(bord) { Kb <- Kborder.engine(X, max(r), length(r), correction, weights=reciplambda, ratio=ratio) if(renormalise) { ynames <- setdiff(fvnames(Kb, "*"), "theo") Kb <- adjust.ratfv(Kb, ynames, denfactor=1/renorm.factor) } Kb <- tweak.ratfv.entry(Kb, "border", new.labl="{hat(%s)[%s]^{bord}} (r)") Kb <- tweak.ratfv.entry(Kb, "bord.modif", new.labl="{hat(%s)[%s]^{bordm}} (r)") } ## uncorrected if(none) { Kn <- Knone.engine(X, max(r), length(r), weights=reciplambda, ratio=ratio) if(renormalise) Kn <- adjust.ratfv(Kn, "un", denfactor=1/renorm.factor) Kn <- tweak.ratfv.entry(Kn, "un", new.labl="{hat(%s)[%s]^{un}} (r)") } K <- if(bord && !none) Kb else if(!bord && none) Kn else if(!ratio) cbind.fv(Kb, Kn[, c("r", "un")]) else bind.ratfv(Kb, Kn[, c("r", "un")], ratio=TRUE) ## tweak labels K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Fast code for rectangular window ########################################### if(can.do.fast && is.rectangle(W) && spatstat.options("use.Krect")) { K <- Krect.engine(X, rmax, length(r), correction, weights=reciplambda, ratio=ratio, fname=c("K", "inhom")) if(renormalise) { allfun <- setdiff(fvnames(K, "*"), "theo") K <- adjust.ratfv(K, allfun, denfactor=1/renorm.factor) } K <- rebadge.fv(K, quote(K[inhom](r)), c("K", "inhom")) attr(K, "alim") <- alim if(danger) attr(K, "dangerous") <- dangerous return(K) } ########################################### # Slower code ########################################### # this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- if(renormalise) (areaW / renorm.factor) else areaW K <- ratfv(K, NULL, denom, argu="r", ylab=quote(K[inhom](r)), valu="theo", fmla=NULL, alim=alim, labl=c("r","{%s[%s]^{pois}}(r)"), desc=desc, fname=c("K", "inhom"), ratio=ratio) # identify all close pairs rmax <- max(r) what <- if(any(correction == "translate")) "all" else "ijd" close <- closepairs(X, rmax, what=what) dIJ <- close$d # compute weights for these pairs I <- close$i J <- close$j # wI <- reciplambda[I] wIJ <- if(!lambda2.given) reciplambda[I] * reciplambda[J] else reciplambda2[cbind(I,J)] # # compute edge corrected estimates if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[I] # apply reduced sample algorithm RS <- Kwtsum(dIJ, bI, wIJ, b, w=reciplambda, breaks) if(any(correction == "border")) { Kb <- RS$ratio if(renormalise) Kb <- Kb * renorm.factor K <- bind.ratfv(K, quotient = data.frame(border=Kb), denominator = denom, labl = "{hat(%s)[%s]^{bord}}(r)", desc = "border-corrected estimate of %s", preferred = "border", ratio=ratio) } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) if(renormalise) Kbm <- Kbm * renorm.factor K <- bind.ratfv(K, quotient = data.frame(bord.modif=Kbm), denominator = denom, labl = "{hat(%s)[%s]^{bordm}}(r)", desc = "modified border-corrected estimate of %s", preferred = "bord.modif", ratio=ratio) } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(dx=close$dx, dy=close$dy, W=W, paired=TRUE) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Ktrans <- cumsum(wh)/areaW if(renormalise) Ktrans <- Ktrans * renorm.factor rmax <- diamW/2 Ktrans[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(trans=Ktrans), denominator = denom, labl ="{hat(%s)[%s]^{trans}}(r)", desc = "translation-correction estimate of %s", preferred = "trans", ratio=ratio) } if(any(correction == "isotropic" | correction == "Ripley")) { # Ripley isotropic correction edgewt <- edge.Ripley(X[I], matrix(dIJ, ncol=1)) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Kiso <- cumsum(wh)/areaW if(renormalise) Kiso <- Kiso * renorm.factor rmax <- diamW/2 Kiso[r >= rmax] <- NA K <- bind.ratfv(K, quotient = data.frame(iso=Kiso), denominator = denom, labl = "{hat(%s)[%s]^{iso}}(r)", desc = "Ripley isotropic correction estimate of %s", preferred = "iso", ratio=ratio) } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(danger) attr(K, "dangerous") <- dangerous return(K) } Kwtsum <- function(dIJ, bI, wIJ, b, w, breaks, fatal=TRUE) { # # "internal" routine to compute border-correction estimates of Kinhom # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # wIJ: product weight for selected I, J pairs # # b: vector of ALL distances to window boundary # w: weights for ALL points # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) stopifnot(length(bI) == length(wIJ)) stopifnot(length(w) == length(b)) if(!is.finite(sum(w, wIJ))) { if(fatal) stop("Weights in K-function were infinite or NA", call.=FALSE) #' set non-finite weights to zero if(any(bad <- !is.finite(w))) { warning(paste(sum(bad), "out of", length(bad), paren(percentage(bad)), "of the boundary weights", "in the K-function were NA or NaN or Inf", "and were reset to zero"), call.=FALSE) w[bad] <- 0 } if(any(bad <- !is.finite(wIJ))) { warning(paste(sum(bad), "out of", length(bad), paren(percentage(bad)), "of the weights for pairwise distances", "in the K-function were NA or NaN or Inf", "and were reset to zero"), call.=FALSE) wIJ[bad] <- 0 } } bkval <- breaks$val # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # # histogram of noncensored distances nco <- whist(dIJ[uncen], bkval, wIJ[uncen]) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], bkval, wIJ[uncen]) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, bkval, w) # total weight of censoring times beyond rightmost breakpoint uppercen <- sum(w[b > breaks$max]) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denominator <- RS$denominator ratio <- RS$numerator/RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denominator) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denominator=denominator, ratio=ratio)) } spatstat.explore/R/plot.fv.R0000644000176200001440000007112514611073310015510 0ustar liggesusers# # plot.fv.R (was: conspire.S) # # $Revision: 1.143 $ $Date: 2024/02/04 08:04:51 $ # # # conspire <- function(...) { # .Deprecated("plot.fv", package="spatstat") # plot.fv(...) # } plot.fv <- local({ hasonlyone <- function(x, amongst) { sum(all.vars(parse(text=x)) %in% amongst) == 1 } extendifvector <- function(a, n, nmore) { if(is.null(a)) return(a) if(length(a) == 1) return(a) return(c(a, rep(a[1], nmore))) } fixit <- function(a, n, a0, a00) { # 'a' is formal argument # 'a0' and 'a00' are default and fallback default # 'n' is number of values required if(is.null(a)) a <- if(!is.null(a0)) a0 else a00 if(length(a) == 1) return(rep.int(a, n)) else if(length(a) != n) stop(paste("Length of", short.deparse(substitute(a)), "does not match number of curves to be plotted")) else return(a) } pow10 <- function(x) { 10^x } clip.to.usr <- function(xlogscale=FALSE, ylogscale=FALSE) { usr <- par('usr') if(xlogscale) usr[1:2] <- 10^(usr[1:2]) if(ylogscale) usr[3:4] <- 10^(usr[3:4]) clip(usr[1], usr[2], usr[3], usr[4]) } plot.fv <- function(x, fmla, ..., subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, clip.xlim=TRUE, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) { xname <- if(is.language(substitute(x))) short.deparse(substitute(x)) else "" force(legendavoid) if(is.null(legend)) legend <- !add mathfont <- match.arg(mathfont) verifyclass(x, "fv") env.user <- parent.frame() indata <- as.data.frame(x) if(missing(log) && add) { ## determine 'log' from current plot xlogscale <- par('xlog') ylogscale <- par('ylog') log <- paste(c("x","y")[c(xlogscale, ylogscale)], collapse="") } else { xlogscale <- (log %in% c("x", "xy", "yx")) ylogscale <- (log %in% c("y", "xy", "yx")) } ## ---------------- determine plot formula ---------------- defaultplot <- missing(fmla) || is.null(fmla) if(defaultplot) fmla <- formula(x) ## This *is* the last possible moment, so... fmla <- as.formula(fmla, env=env.user) lhs.is.dot <- identical(lhs.of.formula(fmla), as.symbol('.')) ## validate the variable names vars <- variablesinformula(fmla) reserved <- c(".", ".x", ".y", ".a", ".s") external <- !(vars %in% c(colnames(x), reserved)) if(any(external)) { sought <- vars[external] found <- unlist(lapply(sought, exists, envir=env.user, mode="numeric")) if(any(!found)) { nnot <- sum(!found) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!found])), ngettext(nnot, "was", "were"), "not found")) } else { ## validate the found variables externvars <- lapply(sought, get, envir=env.user) isnum <- sapply(externvars, is.numeric) len <- lengths(externvars) ok <- isnum & (len == 1 | len == nrow(x)) if(!all(ok)) { nnot <- sum(!ok) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!ok])), ngettext(nnot, "is", "are"), "not of the right format")) } } } ## Extract left hand side as given # lhs.original <- fmla[[2]] fmla.original <- fmla ## expand "." dotnames <- fvnames(x, ".") starnames <- fvnames(x, "*") umap <- fvexprmap(x) fmla <- eval(substitute(substitute(fom, um), list(fom=fmla, um=umap))) ## ------------------- extract data for plot --------------------- ## extract LHS and RHS of formula lhs <- fmla[[2]] rhs <- fmla[[3]] ## extract data lhsdata <- eval(lhs, envir=indata) rhsdata <- eval(rhs, envir=indata) ## reformat if(is.vector(lhsdata)) { lhsdata <- matrix(lhsdata, ncol=1) lhsvars <- all.vars(as.expression(lhs)) lhsvars <- lhsvars[lhsvars %in% names(x)] colnames(lhsdata) <- if(length(lhsvars) == 1) lhsvars else if(length(starnames) == 1 && (starnames %in% lhsvars)) starnames else paste(deparse(lhs), collapse="") } ## check lhs names exist lnames <- colnames(lhsdata) nc <- ncol(lhsdata) lnames0 <- paste("V", seq_len(nc), sep="") if(length(lnames) != nc) colnames(lhsdata) <- lnames0 else if(any(uhoh <- !nzchar(lnames))) colnames(lhsdata)[uhoh] <- lnames0[uhoh] lhs.names <- colnames(lhsdata) ## check whether each lhs column is associated with a single column of 'x' ## that is one of the alternative versions of the function. ## This may be unreliable, as it depends on the ## column names assigned to lhsdata by eval() one.star <- unlist(lapply(lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, lhs.names, "") ## check rhs data if(is.matrix(rhsdata)) stop("rhs of formula should yield a vector") rhsdata <- as.numeric(rhsdata) nplots <- ncol(lhsdata) allind <- 1:nplots ## ---------- extra plots may be implied by 'shade' ----------------- extrashadevars <- NULL if(!is.null(shade)) { ## select columns by name or number names(allind) <- explicit.lhs.names shind <- try(allind[shade]) if(inherits(shind, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) if(any(nbg <- is.na(shind))) { ## columns not included in formula: add them morelhs <- try(as.matrix(indata[ , shade[nbg], drop=FALSE])) if(inherits(morelhs, "try-error")) stop(paste("The argument shade should be a valid subset index", "for columns of x"), call.=FALSE) nmore <- ncol(morelhs) extrashadevars <- colnames(morelhs) if(defaultplot && lhs.is.dot) { success <- TRUE } else if("." %in% variablesinformula(fmla.original)) { ## evaluate lhs of formula, expanding "." to shade names u <- if(length(extrashadevars) == 1) as.name(extrashadevars) else { as.call(lapply(c("cbind", extrashadevars), as.name)) } ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) foo <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)), list(fom=fmla.original))) dont.complain.about(u, ux, uy) lhsnew <- foo[[2]] morelhs <- eval(lhsnew, envir=indata) success <- identical(colnames(morelhs), extrashadevars) } else if(is.name(lhs) && as.character(lhs) %in% names(indata)) { ## lhs is the name of a single column in x ## expand the LHS explicit.lhs.names <- c(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) success <- TRUE } else if(length(explicit.lhs.dotnames) > 1) { ## lhs = cbind(...) where ... are dotnames cbound <- paste0("cbind", paren(paste(explicit.lhs.dotnames, collapse=", "))) if(identical(deparse(lhs), cbound)) { success <- TRUE explicit.lhs.names <- union(explicit.lhs.names, extrashadevars) ff <- paste("cbind", paren(paste(explicit.lhs.names, collapse=", ")), "~ 1") lhs <- lhs.of.formula(as.formula(ff)) } else success <- FALSE } else success <- FALSE if(success) { ## add these columns to the plotting data lhsdata <- cbind(lhsdata, morelhs) shind[nbg] <- nplots + seq_len(nmore) lty <- extendifvector(lty, nplots, nmore) col <- extendifvector(col, nplots, nmore) lwd <- extendifvector(lwd, nplots, nmore) nplots <- nplots + nmore ## update the names one.star <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=fvnames(x, "*"))) one.dot <- unlist(lapply(explicit.lhs.names, hasonlyone, amongst=dotnames)) explicit.lhs.names <- ifelse(one.star, explicit.lhs.names, "") explicit.lhs.dotnames <- ifelse(one.star & one.dot, explicit.lhs.names, "") } else { ## cannot add columns warning(paste("Shade", ngettext(sum(nbg), "column", "columns"), commasep(sQuote(shade[nbg])), "were missing from the plot formula, and were omitted")) shade <- NULL extrashadevars <- NULL } } } ## -------------------- determine plotting limits ---------------------- ## restrict data to subset if desired if(!is.null(subset)) { keep <- if(is.character(subset)) { eval(parse(text=subset), envir=indata) } else eval(subset, envir=indata) lhsdata <- lhsdata[keep, , drop=FALSE] rhsdata <- rhsdata[keep] } ## determine x and y limits and clip data to these limits if(is.null(xlim) && add) { ## x limits are determined by existing plot xlim <- par("usr")[1:2] } if(!is.null(xlim)) { check.range(xlim) ok <- !is.finite(rhsdata) | (xlim[1] <= rhsdata & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## determine the default xlim if(rhs == fvnames(x, ".x")) { ## this is a default plot of f(r) against r if(isFALSE(clip.xlim) || is.null(alim <- attr(x, "alim"))) { ## use the full range available xlim <- range(as.vector(rhsdata), finite=TRUE) } else { ## use the recommended range by default xlim <- alim } if(xlogscale && xlim[1] <= 0) xlim[1] <- min(rhsdata[is.finite(rhsdata) & rhsdata > 0], na.rm=TRUE) ok <- !is.finite(rhsdata) | (rhsdata >= xlim[1] & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { ## this is a non-default plot ## actual range of values to be plotted if(xlogscale) { ok <- is.finite(rhsdata) & (rhsdata > 0) & matrowany(lhsdata > 0) xlim <- range(rhsdata[ok]) } else { xlim <- range(rhsdata, na.rm=TRUE) } } } if(is.null(ylim)) { yok <- is.finite(lhsdata) if(ylogscale) yok <- yok & (lhsdata > 0) ylim <- range(lhsdata[yok],na.rm=TRUE) } if(!is.null(ylim.covers)) ylim <- range(ylim, ylim.covers) ## return x, y limits only? if(limitsonly) return(list(xlim=xlim, ylim=ylim)) ## ------------- work out how to label the plot -------------------- ## extract plot labels, substituting function name labl <- fvlabels(x, expand=TRUE) ## create plot label map (key -> algebraic expression) map <- fvlabelmap(x) ## ......... label for x axis .................. if(is.null(xlab)) { argname <- fvnames(x, ".x") if(as.character(fmla)[3] == argname) { ## The x axis variable is the default function argument. ArgString <- fvlabels(x, expand=TRUE)[[argname]] xexpr <- parse(text=ArgString) ## use specified font xexpr <- fontify(xexpr, mathfont) ## Add name of unit of length? ax <- summary(unitname(x))$axis if(is.null(ax)) { xlab <- xexpr } else { xlab <- expression(VAR ~ COMMENT) xlab[[1]][[2]] <- xexpr[[1]] xlab[[1]][[3]] <- ax } } else { ## map ident to label xlab <- eval(substitute(substitute(rh, mp), list(rh=rhs, mp=map))) ## use specified font xlab <- fontify(xlab, mathfont) } } if(is.language(xlab) && !is.expression(xlab)) xlab <- as.expression(xlab) ## ......... label for y axis ................... leftside <- lhs if(ncol(lhsdata) > 1 || length(dotnames) == 1) { ## For labelling purposes only, simplify the LHS by ## replacing 'cbind(.....)' by '.' ## even if not all columns are included. leftside <- paste(as.expression(leftside)) eln <- explicit.lhs.dotnames eln <- eln[nzchar(eln)] cb <- if(length(eln) == 1) eln else { paste("cbind(", paste(eln, collapse=", "), ")", sep="") } compactleftside <- gsub(cb, ".", leftside, fixed=TRUE) ## Separately expand "." to cbind(.....) ## and ".x", ".y" to their real names dotdot <- c(dotnames, extrashadevars) cball <- if(length(dotdot) == 1) dotdot else { paste("cbind(", paste(dotdot, collapse=", "), ")", sep="") } expandleftside <- gsub(".x", fvnames(x, ".x"), leftside, fixed=TRUE) expandleftside <- gsub(".y", fvnames(x, ".y"), expandleftside, fixed=TRUE) expandleftside <- gsubdot(cball, expandleftside) ## convert back to language compactleftside <- parse(text=compactleftside)[[1]] expandleftside <- parse(text=expandleftside)[[1]] } else { compactleftside <- expandleftside <- leftside } ## construct label for y axis if(is.null(ylab)) { yl <- attr(x, "yexp") if(defaultplot && lhs.is.dot && !is.null(yl)) { ylab <- yl } else { ## replace "." and short identifiers by plot labels ylab <- eval(substitute(substitute(le, mp), list(le=compactleftside, mp=map))) } } if(is.language(ylab)) { ## use specified font ylab <- fontify(ylab, mathfont) ## ensure it's an expression if(!is.expression(ylab)) ylab <- as.expression(ylab) } ## ------------------ start plotting --------------------------- ## create new plot if(!add) do.call(plot.default, resolve.defaults(list(xlim, ylim, type="n", log=log), list(xlab=xlab, ylab=ylab), list(...), list(main=xname))) ## handle 'type' = "n" giventype <- resolve.defaults(list(...), list(type=NA))$type if(identical(giventype, "n")) return(invisible(NULL)) ## process lty, col, lwd arguments opt0 <- spatstat.options("par.fv") lty <- fixit(lty, nplots, opt0$lty, 1:nplots) col <- fixit(col, nplots, opt0$col, 1:nplots) lwd <- fixit(lwd, nplots, opt0$lwd, 1) ## convert to greyscale? if(spatstat.options("monochrome")) col <- to.grey(col) if(!is.null(shade)) { ## shade region between critical boundaries ## extract relevant columns for shaded bands shdata <- lhsdata[, shind] if(!is.matrix(shdata) || ncol(shdata) != 2) stop("The argument shade should select two columns of x") ## truncate infinite values to plot limits if(any(isinf <- is.infinite(shdata))) { if(is.null(ylim)) { warning("Unable to truncate infinite values to the plot area") } else { shdata[isinf & (shdata == Inf)] <- ylim[2] shdata[isinf & (shdata == -Inf)] <- ylim[1] } } ## determine limits of shading shdata1 <- shdata[,1] shdata2 <- shdata[,2] ## plot grey polygon xpoly <- c(rhsdata, rev(rhsdata)) ypoly <- c(shdata1, rev(shdata2)) miss1 <- !is.finite(shdata1) miss2 <- !is.finite(shdata2) if(!any(broken <- (miss1 | miss2))) { ## single polygon clip.to.usr(xlogscale, ylogscale) polygon(xpoly, ypoly, border=shadecol, col=shadecol) } else { ## interrupted dat <- data.frame(rhsdata=rhsdata, shdata1=shdata1, shdata2=shdata2) serial <- cumsum(broken) lapply(split(dat, serial), function(z) { with(z, { xp <- c(rhsdata, rev(rhsdata)) yp <- c(shdata1, rev(shdata2)) clip.to.usr(xlogscale, ylogscale) polygon(xp, yp, border=shadecol, col=shadecol) }) }) ## save for use in placing legend okp <- !c(broken, rev(broken)) xpoly <- xpoly[okp] ypoly <- ypoly[okp] } ## overwrite graphical parameters lty[shind] <- 1 ## try to preserve the same type of colour specification if(is.character(col) && is.character(shadecol)) { ## character representations col[shind] <- shadecol } else if(is.numeric(col) && !is.na(sc <- paletteindex(shadecol))) { ## indices in colour palette col[shind] <- sc } else { ## convert colours to hexadecimal and edit relevant values col <- col2hex(col) col[shind] <- col2hex(shadecol) } ## remove these columns from further plotting allind <- allind[-shind] ## } else xpoly <- ypoly <- numeric(0) ## ----------------- plot lines ------------------------------ for(i in allind) { clip.to.usr(xlogscale, ylogscale) lines(rhsdata, lhsdata[,i], lty=lty[i], col=col[i], lwd=lwd[i]) } if(nplots == 1) return(invisible(NULL)) ## ---------------- determine legend ------------------------- key <- colnames(lhsdata) mat <- match(key, names(x)) keyok <- !is.na(mat) matok <- mat[keyok] legdesc <- rep.int("constructed variable", length(key)) legdesc[keyok] <- attr(x, "desc")[matok] leglabl <- lnames0 leglabl[keyok] <- labl[matok] ylab <- attr(x, "ylab") if(!is.null(ylab)) { if(is.language(ylab)) ylab <- flat.deparse(ylab) if(any(grepl("%s", legdesc))) legdesc <- sprintf(legdesc, ylab) } ## compute legend info legtxt <- key if(legendmath) { legtxt <- leglabl if(defaultplot && lhs.is.dot) { ## try to convert individual labels to expressions fancy <- try(parse(text=leglabl), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } else { ## try to navigate the parse tree fancy <- try(fvlegend(x, expandleftside), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } } if(is.expression(legtxt) || is.language(legtxt) || all(sapply(legtxt, is.language))) legtxt <- fontify(legtxt, mathfont) ## --------------- handle legend plotting ----------------------------- if(identical(legend, TRUE)) { ## legend will be plotted ## Basic parameters of legend legendxpref <- if(identical(legendpos, "float")) NULL else legendpos optparfv <- spatstat.options("par.fv")$legendargs %orifnull% list() legendspec <- resolve.defaults(legendargs, list(lty=lty, col=col, lwd=lwd), optparfv, list(x=legendxpref, legend=legtxt, inset=0.05, y.intersp=if(legendmath) 1.3 else 1), .StripNull=TRUE) if(!any(names(legendspec) == "bg")) { ## background colour unspecified: default is transparent if available tB <- safeDevCapabilities()$transparentBackground tBok <- (length(tB) > 0) && !anyNA(tB) && !identical(tB, "no") if(tBok) legendspec$bg <- "transparent" } if(legendavoid || identical(legendpos, "float")) { ## Automatic determination of legend position ## Assemble data for all plot objects linedata <- list() xmap <- if(xlogscale) log10 else identity ymap <- if(ylogscale) log10 else identity inv.xmap <- if(xlogscale) pow10 else identity inv.ymap <- if(ylogscale) pow10 else identity for(i in seq_along(allind)) linedata[[i]] <- list(x=xmap(rhsdata), y=ymap(lhsdata[,i])) polydata <- if(length(xpoly) > 0) list(x=xmap(xpoly), y=ymap(ypoly)) else NULL #' ensure xlim, ylim define a box boxXlim <- if(diff(xlim) > 0) xlim else par('usr')[1:2] boxYlim <- if(diff(ylim) > 0) ylim else par('usr')[3:4] #' objects <- assemble.plot.objects(xmap(boxXlim), ymap(boxYlim), lines=linedata, polygon=polydata) ## find best position to avoid them legendbest <- findbestlegendpos(objects, preference=legendpos, legendspec=legendspec) ## handle log scale if((xlogscale || ylogscale) && checkfields(legendbest, c("x", "xjust", "yjust"))) { ## back-transform x, y coordinates legendbest$x$x <- inv.xmap(legendbest$x$x) legendbest$x$y <- inv.ymap(legendbest$x$y) } } else legendbest <- list() ## ********** plot legend ************************* if(!is.null(legend) && legend) do.call(graphics::legend, resolve.defaults(legendargs, legendbest, legendspec, .StripNull=TRUE)) } ## convert labels back to character labl <- paste.expr(legtxt) labl <- gsub(" ", "", labl) ## return legend info df <- data.frame(lty=lty, col=col, key=key, label=labl, meaning=legdesc, row.names=key) return(invisible(df)) } plot.fv }) assemble.plot.objects <- function(xlim, ylim, ..., lines=NULL, polygon=NULL) { # Take data that would have been passed to the commands 'lines' and 'polygon' # and form corresponding geometrical objects. objects <- list() if(!is.null(lines)) { if(is.psp(lines)) { objects <- list(lines) } else { if(checkfields(lines, c("x", "y"))) { lines <- list(lines) } else if(!all(unlist(lapply(lines, checkfields, L=c("x", "y"))))) stop("lines should be a psp object, a list(x,y) or a list of list(x,y)") W <- owinInternalRect(xlim, ylim) for(i in seq_along(lines)) { lines.i <- lines[[i]] x.i <- lines.i$x y.i <- lines.i$y n <- length(x.i) if(length(y.i) != n) stop(paste(paste("In lines[[", i, "]]", sep=""), "the vectors x and y have unequal length")) if(!all(ok <- (is.finite(x.i) & is.finite(y.i)))) { x.i <- x.i[ok] y.i <- y.i[ok] n <- sum(ok) } segs.i <- psp(x.i[-n], y.i[-n], x.i[-1], y.i[-1], W, check=FALSE) if(!all(inside.range(range(x.i), xlim)) || !all(inside.range(range(y.i), ylim))) segs.i <- cliprect.psp(segs.i, W) objects <- append(objects, list(segs.i)) } } } if(!is.null(polygon)) { # Add filled polygon pol <- polygon[c("x", "y")] ok <- with(pol, is.finite(x) & is.finite(y)) if(!all(ok)) pol <- with(pol, list(x=x[ok], y=y[ok])) if(Area.xypolygon(pol) < 0) pol <- lapply(pol, rev) P <- try(owin(poly=pol, xrange=xlim, yrange=ylim, check=FALSE)) if(!inherits(P, "try-error")) { P <- intersect.owin(P, W) objects <- append(objects, list(P)) } } return(objects) } findbestlegendpos <- local({ ## Given a list of geometrical objects, find the best position ## to avoid them. bestlegendpos <- function(objects, show=FALSE, aspect=1, bdryok=TRUE, preference="float", verbose=FALSE, legendspec=NULL) { if(any(vacuous <- sapply(objects, is.empty))) { if(all(vacuous)) stop("All objects were empty") objects <- objects[!vacuous] } ## find bounding box W <- do.call(boundingbox, lapply(objects, as.rectangle)) ## convert to common box objects <- lapply(objects, rebound, rect=W) ## rescale x and y axes so that bounding box has aspect ratio 'aspect' aspectW <- with(W, diff(yrange)/diff(xrange)) s <- aspect/aspectW mat <- diag(c(1, s)) invmat <- diag(c(1, 1/s)) scaled.objects <- lapply(objects, affine, mat=mat) scaledW <- affine(W, mat=mat) if(verbose) { cat("Scaled space:\n") print(scaledW) } ## reinstate common box scaled.objects <- lapply(scaled.objects, rebound, rect=scaledW) ## pixellate the scaled objects pix.scal.objects <- lapply(scaled.objects, asma) ## handle very tiny or thin objects if(any(tiny <- sapply(pix.scal.objects, is.empty))) pix.scal.objects[tiny] <- lapply(scaled.objects[tiny], distmap) ## apply distance transforms in scaled space D1 <- distmap(pix.scal.objects[[1]]) Dlist <- lapply(pix.scal.objects, distmap, xy=list(x=D1$xcol, y=D1$yrow)) ## distance transform of superposition D <- im.apply(Dlist, min) if(!bdryok) { ## include distance to boundary B <- attr(D1, "bdry") D <- eval.im(pmin.int(D, B)) } if(show) { plot(affine(D, mat=invmat), add=TRUE) lapply(lapply(scaled.objects, affine, mat=invmat), plot, add=TRUE) } if(preference != "float") { ## evaluate preferred location (check for collision) if(!is.null(legendspec)) { ## pretend to plot the legend as specified legout <- do.call(graphics::legend, append(legendspec, list(plot=FALSE))) ## determine bounding box legbox <- with(legout$rect, owinInternalRect(c(left, left+w), c(top-h, top))) scaledlegbox <- affine(legbox, mat=mat) ## check for collision Dmin <- min(D[scaledlegbox]) if(Dmin >= 0.02) { ## no collision: stay at preferred location. Exit. return(list(x=preference)) } ## collision occurred! } else { ## no legend information. ## Pretend legend is 15% of plot width and height xr <- scaledW$xrange yr <- scaledW$yrange testloc <- switch(preference, topleft = c(xr[1],yr[2]), top = c(mean(xr), yr[2]), topright = c(xr[2], yr[2]), right = c(xr[2], mean(yr)), bottomright = c(xr[2], yr[1]), bottom = c(mean(xr), yr[1]), bottomleft = c(xr[1], yr[1]), left = c(xr[1], mean(yr)), center = c(mean(xr), mean(yr)), NULL) if(!is.null(testloc)) { ## look up distance value at preferred location testpat <- ppp(x=testloc[1], y=testloc[2], xr, yr, check=FALSE) val <- safelookup(D, testpat) crit <- 0.15 * min(diff(xr), diff(yr)) if(verbose) cat(paste("val=",val, ", crit=", crit, "\n")) if(val > crit) { ## no collision: stay at preferred location. Exit. return(list(x=preference)) } ## collision occurred! } } ## collision occurred! } ## find location of max locmax <- which(D$v == max(D), arr.ind=TRUE) locmax <- unname(locmax[1,]) pos <- list(x=D$xcol[locmax[2]], y=D$yrow[locmax[1]]) pos <- affinexy(pos, mat=invmat) if(show) points(pos) ## determine justification of legend relative to this point ## to avoid crossing edges of plot xrel <- (pos$x - W$xrange[1])/diff(W$xrange) yrel <- (pos$y - W$yrange[1])/diff(W$yrange) xjust <- if(xrel < 0.1) 0 else if(xrel > 0.9) 1 else 0.5 yjust <- if(yrel < 0.1) 0 else if(yrel > 0.9) 1 else 0.5 ## out <- list(x=pos, xjust=xjust, yjust=yjust) return(out) } asma <- function(z) { if(is.owin(z)) as.mask(z) else if(is.psp(z)) psp2mask(z) else NULL } callit <- function(...) { rslt <- try(bestlegendpos(...)) if(!inherits(rslt, "try-error")) return(rslt) return(list()) } callit }) spatstat.explore/R/progressplots.R0000644000176200001440000002620014611073310017040 0ustar liggesusers## ## progressplots.R ## ## $Revision: 1.22 $ $Date: 2023/05/09 02:13:41 $ ## ## progress plots (envelope representations) ## dclf.progress <- function(X, ...) mctest.progress(X, ..., exponent=2) mad.progress <- function(X, ...) mctest.progress(X, ..., exponent=Inf) mctest.progress <- local({ smoothquantile <- function(z, alpha) { min(quantile(density(z), 1-alpha), max(z)) } silentmax <- function(z) { if(all(is.nan(z))) return(NaN) z <- z[is.finite(z)] if(length(z) == 0) return(NA) else return(max(z)) } mctest.progress <- function(X, fun=Lest, ..., exponent=1, nrank=1, interpolate=FALSE, alpha, rmin=0) { check.1.real(exponent) explain.ifnot(exponent >= 0) if(missing(fun) && inherits(X, "envelope")) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., rmin=rmin, exponent=exponent) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim nsim <- ncol(devsim) # determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } alphastring <- paste(100 * alpha, "%%", sep="") # compute critical values critval <- if(interpolate) apply(devsim, 1, smoothquantile, alpha=alpha) else if(nrank == 1) apply(devsim, 1, silentmax) else apply(devsim, 1, orderstats, k=nrank, decreasing=TRUE) # create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=critval, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, alphastring, "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") p <- hasenvelope(p, Z$envelope) # envelope may be NULL return(p) } mctest.progress }) # Do not call this function. # Performs underlying computations envelopeProgressData <- local({ envelopeProgressData <- function(X, fun=Lest, ..., exponent=1, alternative=c("two.sided", "less", "greater"), leaveout=1, scale=NULL, clamp=FALSE, normalize=FALSE, deflate=FALSE, rmin=0, save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE) { alternative <- match.arg(alternative) if(!(leaveout %in% 0:2)) stop("Argument leaveout should equal 0, 1 or 2") ## compute or extract simulated functions X <- envelope(X, fun=fun, ..., alternative=alternative, savefuns=TRUE, savepatterns=savepatterns) Y <- attr(X, "simfuns") ## extract values R <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1] nsim <- ncol(sim) ## choose function as reference has.theo <- ("theo" %in% names(X)) use.theo <- identical(attr(X, "einfo")$use.theory, TRUE) if(use.theo && !has.theo) warning("No theoretical function available; use.theory ignored") if(use.theo && has.theo) { # theo.used <- TRUE reference <- with(X, theo) leaveout <- 0 } else { # theo.used <- FALSE if(leaveout == 2) { ## use sample mean of simulations only reference <- with(X, mmean) } else { ## use sample mean of simulations *and* observed reference <- (nsim * with(X, mmean) + obs)/(nsim + 1) } } ## restrict range if(rmin > 0) { if(sum(R >= rmin) < 2) stop("rmin is too large for the available range of r values") nskip <- sum(R < rmin) } else nskip <- 0 ## determine rescaling if any if(is.null(scale)) { scaling <- NULL scr <- 1 } else if(is.function(scale)) { scaling <- scale(R) sname <- "scale(r)" ans <- check.nvector(scaling, length(R), things="values of r", fatal=FALSE, vname=sname) if(!ans) stop(attr(ans, "whinge"), call.=FALSE) if(any(bad <- (scaling <= 0))) { ## issue a warning unless this only happens at r=0 if(any(bad[R > 0])) warning(paste("Some values of", sname, "were negative or zero:", "scale was reset to 1 for these values"), call.=FALSE) scaling[bad] <- 1 } scr <- scaling } else stop("Argument scale should be a function") ## compute deviations rawdevDat <- Deviation(obs, reference, leaveout, nsim, sim[,1]) rawdevSim <- Deviation(sim, reference, leaveout, nsim) ## evaluate signed/absolute deviation relevant to alternative ddat <- RelevantDeviation(rawdevDat, alternative, clamp, scaling) dsim <- RelevantDeviation(rawdevSim, alternative, clamp, scaling) ## compute test statistics if(is.infinite(exponent)) { ## MAD devdata <- cummaxskip(ddat, nskip) devsim <- apply(dsim, 2, cummaxskip, nskip=nskip) if(deflate) { devdata <- scr * devdata devsim <- scr * devsim } testname <- "Maximum absolute deviation test" } else { dR <- c(0, diff(R)) if(clamp || (alternative == "two.sided")) { ## deviations are nonnegative devdata <- cumsumskip(dR * ddat^exponent, nskip) devsim <- apply(dR * dsim^exponent, 2, cumsumskip, nskip=nskip) } else { ## sign of deviations should be retained devdata <- cumsumskip(dR * sign(ddat) * abs(ddat)^exponent, nskip=nskip) devsim <- apply(dR * sign(dsim) * abs(dsim)^exponent, 2, cumsumskip, nskip=nskip) } if(normalize) { devdata <- devdata/R devsim <- sweep(devsim, 1, R, "/") } if(deflate) { devdata <- scr * sign(devdata) * abs(devdata)^(1/exponent) devsim <- scr * sign(devsim) * abs(devsim)^(1/exponent) } testname <- if(exponent == 2) "Diggle-Cressie-Loosmore-Ford test" else if(exponent == 1) "Integral absolute deviation test" else paste("Integrated", ordinal(exponent), "Power Deviation test") } result <- list(R=R, devdata=devdata, devsim=devsim, testname=testname, scaleR=scr, clamp=clamp) if(save.envelope) result$envelope <- X return(result) } cumsumskip <- function(x, nskip=0) { if(nskip == 0) cumsum(x) else c(rep(NA, nskip), cumsum(x[-seq_len(nskip)])) } cummaxskip <- function(x, nskip=0) { if(nskip == 0) cummax(x) else c(rep(NA, nskip), cummax(x[-seq_len(nskip)])) } envelopeProgressData }) dg.progress <- function(X, fun=Lest, ..., exponent=2, nsim=19, nsimsub=nsim-1, nrank=1, alpha, leaveout=1, interpolate=FALSE, rmin=0, savefuns=FALSE, savepatterns=FALSE, verbose=TRUE) { env.here <- sys.frame(sys.nframe()) if(!missing(nsimsub) && !relatively.prime(nsim, nsimsub)) stop("nsim and nsimsub must be relatively prime") ## determine 'alpha' and 'nrank' if(missing(alpha)) { if((nrank %% 1) != 0) stop("nrank must be an integer") alpha <- nrank/(nsim + 1) } else { check.1.real(alpha) stopifnot(alpha > 0 && alpha < 1) if(!interpolate) { if(!missing(nrank)) warning("nrank was ignored because alpha was given", call.=FALSE) nrank <- alpha * (nsim + 1) if(abs(nrank - round(nrank)) > 1e-2) stop("alpha should be a multiple of 1/(nsim + 1)", call.=FALSE) nrank <- as.integer(round(nrank)) } } if(verbose) cat("Computing first-level test data...") ## generate or extract simulated patterns and functions E <- envelope(X, fun=fun, ..., nsim=nsim, savepatterns=TRUE, savefuns=TRUE, verbose=FALSE, envir.simul=env.here) ## get progress data PD <- envelopeProgressData(E, fun=fun, ..., rmin=rmin, nsim=nsim, exponent=exponent, leaveout=leaveout, verbose=FALSE) ## get first level MC test significance trace T1 <- mctest.sigtrace(E, fun=fun, nsim=nsim, exponent=exponent, leaveout=leaveout, interpolate=interpolate, rmin=rmin, confint=FALSE, verbose=FALSE, ...) R <- T1$R phat <- T1$pest if(verbose) { cat("Done.\nComputing second-level data... ") state <- list() } ## second level traces simpat <- attr(E, "simpatterns") phat2 <- matrix(, length(R), nsim) for(j in seq_len(nsim)) { simj <- simpat[[j]] sigj <- mctest.sigtrace(simj, fun=fun, nsim=nsimsub, exponent=exponent, interpolate=interpolate, leaveout=leaveout, rmin=rmin, confint=FALSE, verbose=FALSE, ...) phat2[,j] <- sigj$pest if(verbose) state <- progressreport(j, nsim, state=state) } if(verbose) cat("Done.\n") ## Dao-Genton procedure dgcritrank <- 1 + rowSums(phat > phat2) dgcritrank <- pmin(dgcritrank, nsim) devsim.sort <- t(apply(PD$devsim, 1, sort, decreasing=TRUE, na.last=TRUE)) ii <- cbind(seq_along(dgcritrank), dgcritrank) devcrit <- devsim.sort[ii] devdata <- PD$devdata ## create fv object fname <- if(is.infinite(exponent)) "mad" else if(exponent == 2) "T" else paste("D[",exponent,"]", sep="") ylab <- if(is.infinite(exponent)) quote(mad(R)) else if(exponent == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=exponent))) df <- data.frame(R=R, obs=devdata, crit=devcrit, zero=0) mcname <- if(interpolate) "interpolated Monte Carlo" else "Monte Carlo" p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste(mcname, paste0(100 * alpha, "%%"), "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit", "zero") fvnames(p, ".s") <- c("zero", "crit") if(savefuns || savepatterns) p <- hasenvelope(p, E) return(p) } spatstat.explore/R/scanstat.R0000644000176200001440000002573414611073310015745 0ustar liggesusers## ## scanstat.R ## ## Spatial scan statistics ## ## $Revision: 1.24 $ $Date: 2023/12/09 02:08:03 $ ## scanmeasure <- function(X, ...){ UseMethod("scanmeasure") } scanmeasure.ppp <- function(X, r, ..., method=c("counts", "fft")) { method <- match.arg(method) check.1.real(r) ## enclosing window R <- as.rectangle(as.owin(X)) ## determine pixel resolution M <- as.mask(R, ...) ## expand domain to include centres of all circles intersecting R W <- grow.mask(M, r) ## switch(method, counts = { ## direct calculation using C code ## get new dimensions dimyx <- W$dim xr <- W$xrange yr <- W$yrange nr <- dimyx[1] nc <- dimyx[2] ## n <- npoints(X) zz <- .C(SE_scantrans, x=as.double(X$x), y=as.double(X$y), n=as.integer(n), xmin=as.double(xr[1]), ymin=as.double(yr[1]), xmax=as.double(xr[2]), ymax=as.double(yr[2]), nr=as.integer(nr), nc=as.integer(nc), R=as.double(r), counts=as.integer(numeric(prod(dimyx))), PACKAGE="spatstat.explore") zzz <- matrix(zz$counts, nrow=dimyx[1], ncol=dimyx[2], byrow=TRUE) Z <- im(zzz, xrange=xr, yrange=yr, unitname=unitname(X)) }, fft = { ## Previous version of scanmeasure.ppp had ## Y <- pixellate(X, ..., padzero=TRUE) ## but this is liable to Gibbs phenomena. ## Instead, convolve with small Gaussian (sd = 1 pixel width) sigma <- with(W, unique(c(xstep, ystep))) Y <- density(X, ..., sigma=sigma) ## invoke scanmeasure.im Z <- scanmeasure(Y, r) Z <- eval.im(as.integer(round(Z))) }) return(Z) } scanmeasure.im <- function(X, r, ...) { D <- disc(radius=r) eps <- with(X, c(xstep,ystep)) if(any(eps >= 2 * r)) return(eval.im(X * pi * r^2)) D <- as.im(as.mask(D, eps=eps)) Z <- imcov(X, D) return(Z) } scanPoisLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) ll <- nlogn(nZ, muZ) + nlogn(nZco, muZco) - nlogn(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanBinomLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) logbin <- function(k, n) { nlogn(k, n) + nlogn(n-k, n) } ll <- logbin(nZ, muZ) + logbin(nZco, muZco) - logbin(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanLRTS <- function(X, r, ..., method=c("poisson", "binomial"), baseline=NULL, case=2, alternative=c("greater", "less", "two.sided"), saveopt = FALSE, Xmask=NULL) { stopifnot(is.ppp(X)) check.nvector(r, vname="r") if(length(r) == 0) return(as.imlist(list(), check=FALSE)) method <- match.arg(method) alternative <- match.arg(alternative) if(is.null(Xmask)) Xmask <- as.mask(as.owin(X), ...) switch(method, poisson={ Y <- X if(is.null(baseline)) { mu <- as.im(Xmask, value=1) } else if(inherits(baseline, "ppm")) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'baseline' is a fitted model", call.=FALSE) if(is.marked(baseline)) stop("baseline is a marked point process: not supported") mu <- predict(baseline, locations=Xmask) } else if(is.im(baseline) || is.function(baseline)) { mu <- as.im(baseline, W=Xmask) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) nG <- npoints(Y) }, binomial={ stopifnot(is.multitype(X)) lev <- levels(marks(X)) if(length(lev) != 2) warning("X should usually be a bivariate (2-type) point pattern") if(!is.null(baseline)) stop("baseline is not supported in the binomial case") if(is.character(case) && !(case %in% lev)) stop(paste("Unrecognised label for cases:", sQuote(case))) if(is.numeric(case) && !(case %in% seq_along(lev))) stop(paste("Undefined level:", case)) Y <- split(X)[[case]] nG <- npoints(Y) mu <- unmark(X) }) ## nr <- length(r) lrts <- vector(mode="list", length=nr) for(i in 1:nr) { ri <- r[i] nZ <- scanmeasure(Y, ri, xy=Xmask) muZ <- scanmeasure(mu, ri) if(!compatible.im(nZ, muZ)) { ha <- harmonise.im(nZ, muZ) nZ <- ha[[1]] muZ <- ha[[2]] } switch(method, poisson = { muG <- integral.im(mu) lrts[[i]] <- eval.im(scanPoisLRTS(nZ, nG, muZ, muG, alternative)) }, binomial = { muG <- npoints(mu) lrts[[i]] <- eval.im(scanBinomLRTS(nZ, nG, muZ, muG, alternative)) }) } if(length(lrts) == 1) { result <- lrts[[1]] } else { result <- im.apply(lrts, max) if(saveopt) attr(result, "iopt") <- im.apply(lrts, which.max) } return(result) } scan.test <- function(X, r, ..., method=c("poisson", "binomial"), nsim = 19, baseline=NULL, case = 2, alternative=c("greater", "less", "two.sided"), verbose=TRUE) { dataname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) method <- match.arg(method) alternative <- match.arg(alternative) stopifnot(is.numeric(r)) check.1.real(nsim) if(!(round(nsim) == nsim && nsim > 1)) stop("nsim should be an integer > 1") regionname <- paste("circles of radius", if(length(r) == 1) r else paste("between", min(r), "and", max(r))) ## ## compute observed loglikelihood function ## This also validates the arguments. obsLRTS <- scanLRTS(X=X, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ..., saveopt=TRUE) obs <- max(obsLRTS) sim <- numeric(nsim) ## determine how to simulate switch(method, binomial={ methodname <- c("Spatial scan test", "Null hypothesis: constant relative risk", paste("Candidate cluster regions:", regionname), "Likelihood: binomial", paste("Monte Carlo p-value based on", nsim, "simulations")) lev <- levels(marks(X)) names(lev) <- lev casename <- lev[case] counted <- paste("points with mark", sQuote(casename), "inside cluster region") simexpr <- expression(rlabel(X)) }, poisson={ counted <- paste("points inside cluster region") X <- unmark(X) Xwin <- as.owin(X) Xmask <- as.mask(Xwin, ...) if(is.null(baseline)) { nullname <- "Complete Spatial Randomness (CSR)" lambda <- intensity(X) simexpr <- expression(runifpoispp(lambda, Xwin)) dont.complain.about(lambda) } else if(inherits(baseline, "ppm")) { if(!requireNamespace("spatstat.model")) stop("The package spatstat.model is required when 'baseline' is a fitted model", call.=FALSE) nullname <- baseline$callstring rmhstuff <- rmh(baseline, preponly=TRUE, verbose=FALSE) simexpr <- expression(rmhEngine(rmhstuff)) dont.complain.about(rmhstuff) } else if(is.im(baseline) || is.function(baseline)) { nullname <- "Poisson process with intensity proportional to baseline" base <- as.im(baseline, W=Xmask) alpha <- npoints(X)/integral.im(base) lambda <- eval.im(alpha * base) simexpr <- expression(rpoispp(lambda)) dont.complain.about(lambda) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) methodname <- c("Spatial scan test", paste("Null hypothesis:", nullname), paste("Candidate cluster regions:", regionname), "Likelihood: Poisson", paste("Monte Carlo p-value based on", nsim, "simulations")) }) if(verbose) { cat("Simulating...") pstate <- list() } for(i in 1:nsim) { if(verbose) pstate <- progressreport(i, nsim, state=pstate) Xsim <- eval(simexpr) simLRTS <- scanLRTS(X=Xsim, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ...) sim[i] <- max(simLRTS) } pval <- mean(c(sim,obs) >= obs, na.rm=TRUE) names(obs) <- "maxLRTS" nm.alternative <- switch(alternative, greater="Excess of", less="Deficit of", two.sided="Two-sided: excess or deficit of", stop("Unknown alternative")) nm.alternative <- paste(nm.alternative, counted) result <- list(statistic = obs, p.value = pval, alternative = nm.alternative, method = methodname, data.name = dataname) class(result) <- c("scan.test", "htest") attr(result, "obsLRTS") <- obsLRTS attr(result, "X") <- X attr(result, "r") <- r return(result) } plot.scan.test <- function(x, ..., what=c("statistic", "radius"), do.window=TRUE) { xname <- short.deparse(substitute(x)) what <- match.arg(what) Z <- as.im(x, what=what) dont.complain.about(Z) do.call(plot, resolve.defaults(list(x=quote(Z)), list(...), list(main=xname))) if(do.window) { X <- attr(x, "X") plot(as.owin(X), add=TRUE, invert=TRUE) } invisible(NULL) } as.im.scan.test <- function(X, ..., what=c("statistic", "radius")) { Y <- attr(X, "obsLRTS") what <- match.arg(what) if(what == "radius") { iopt <- attr(Y, "iopt") r <- attr(X, "r") Y <- eval.im(r[iopt]) } return(as.im(Y, ...)) } spatstat.explore/R/pairs.im.R0000644000176200001440000001310314611073310015632 0ustar liggesusers# # pairs.im.R # # $Revision: 1.23 $ $Date: 2022/11/03 11:08:33 $ # pairs.listof <- pairs.solist <- function(..., plot=TRUE) { argh <- expandSpecialLists(list(...), special=c("solist", "listof")) names(argh) <- good.names(names(argh), "V", seq_along(argh)) haslines <- any(sapply(argh, inherits, what="linim")) if(haslines) { if(!requireNamespace("spatstat.linnet")) { warning(paste("the pairs() plot for images on a linear network", "requires the package 'spatstat.linnet'"), call.=FALSE) return(NULL) } do.call(spatstat.linnet::pairs.linim, append(argh, list(plot=plot))) } else { do.call(pairs.im, append(argh, list(plot=plot))) } } pairs.im <- local({ allpixelvalues <- function(Z) { as.numeric(as.matrix(Z)) } pairs.im <- function(..., plot=TRUE, drop=TRUE) { argh <- list(...) cl <- match.call() ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1]] if(is.list(arg1) && all(unlist(lapply(arg1, is.im)))) argh <- arg1 } ## identify which arguments are images isim <- unlist(lapply(argh, is.im)) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] ## determine image names for plotting imnames <- argh$labels %orifnull% names(imlist) if(length(imnames) != nim || !all(nzchar(imnames))) { #' names not given explicitly callednames <- paste(cl)[c(FALSE, isim, FALSE)] backupnames <- paste0("V", seq_len(nim)) if(length(callednames) != nim) { callednames <- backupnames } else if(any(toolong <- (nchar(callednames) > 15))) { callednames[toolong] <- backupnames[toolong] } imnames <- good.names(imnames, good.names(callednames, backupnames)) } ## if(nim == 1) { ## one image: plot histogram Z <- imlist[[1L]] xname <- imnames[1L] do.call(hist, resolve.defaults(list(x=quote(Z), plot=plot), rest, list(xlab=xname, main=paste("Histogram of", xname)))) ## save pixel values pixvals <- list(allpixelvalues(Z)) names(pixvals) <- xname } else { ## extract pixel rasters and reconcile them imwins <- solapply(imlist, as.owin) names(imwins) <- NULL rasta <- do.call(intersect.owin, imwins) ## convert images to common raster imlist <- lapply(imlist, "[.im", i=rasta, raster=rasta, drop=FALSE) ## extract pixel values pixvals <- lapply(imlist, allpixelvalues) } ## combine into data frame pixdf <- do.call(data.frame, pixvals) ## remove NA's if(drop) pixdf <- pixdf[complete.cases(pixdf), , drop=FALSE] ## pairs plot if(plot && nim > 1) do.call(pairs, resolve.defaults(list(x=quote(pixdf)), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels class(pixdf) <- c("plotpairsim", class(pixdf)) return(invisible(pixdf)) } pairs.im }) plot.plotpairsim <- function(x, ...) { xname <- short.deparse(substitute(x)) x <- as.data.frame(x) if(ncol(x) == 1) { x <- x[,1L] do.call(hist.default, resolve.defaults(list(x=quote(x)), list(...), list(main=xname, xlab=xname))) } else { do.call(pairs.default, resolve.defaults(list(x=quote(x)), list(...), list(pch="."))) } return(invisible(NULL)) } print.plotpairsim <- function(x, ...) { cat("Object of class plotpairsim\n") cat(paste("contains pixel data for", commasep(sQuote(colnames(x))), "\n")) return(invisible(NULL)) } panel.image <- function(x, y, ..., sigma=NULL) { opa <- par(usr = c(0, 1, 0, 1)) on.exit(par(opa)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) plot(density(p, sigma=sigma), add=TRUE, ...) } panel.contour <- function(x, y, ..., sigma=NULL) { opa <- par(usr = c(0, 1, 0, 1)) on.exit(par(opa)) xx <- scaletointerval(x) yy <- scaletointerval(y) p <- ppp(xx, yy, window=square(1), check=FALSE) Z <- density(p, sigma=sigma) dont.complain.about(Z) do.call(contour, resolve.defaults(list(x=quote(Z), add=TRUE), list(...), list(drawlabels=FALSE))) } panel.histogram <- function(x, ...) { usr <- par("usr") opa <- par(usr = c(usr[1:2], 0, 1.5) ) on.exit(par(opa)) h <- hist(x, plot = FALSE) breaks <- h$breaks; nB <- length(breaks) y <- h$counts; y <- y/max(y) do.call(rect, resolve.defaults(list(xleft = breaks[-nB], ybottom = 0, xright = breaks[-1], ytop = y), list(...), list(col="grey"))) } ## pairwise things like correlations cov.im <- function(..., use = "everything", method = c("pearson", "kendall", "spearman")) { df <- pairs.im(..., plot=FALSE, drop=FALSE) V <- cov(df, use=use, method=method) return(V) } cor.im <- function(..., use = "everything", method = c("pearson", "kendall", "spearman")) { df <- pairs.im(..., plot=FALSE, drop=FALSE) R <- cor(df, use=use, method=method) return(R) } spatstat.explore/R/pool.R0000644000176200001440000002232714611073310015071 0ustar liggesusers#' #' pool.R #' #' pool Generic #' pool.fv #' pool.rat #' pool.fasp #' #' $Revision: 1.9 $ $Date: 2022/11/03 11:08:33 $ pool <- function(...) { UseMethod("pool") } pool.anylist <- function(x, ...) { do.call(pool, append(x, list(...))) } ## ................................................ pool.fv <- local({ Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Cmul <- function(A, f) { force(A); force(f); eval.fv(f * A, relabel=FALSE) } DotOnly <- function(A) { force(A); eval.fv(A, relabel=FALSE) } pool.fv <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## validate isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") argh <- do.call(harmonise, append(argh, list(strict=TRUE))) ## make a template for the result template <- argh[[1L]] template <- eval.fv(template) ## restricts to 'dotnames' template <- vanilla.fv(template) ## remove weird attributes ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions", vname="weights") Y <- Map(Cmul, argh, weights) XY <- Map(Cmul, argh, weights^2) sumX <- sum(weights) sumX2 <- sum(weights^2) } else { ## default: weights=1 Y <- XY <- argh sumX <- sumX2 <- narg } ## sum sumY <- Reduce(Add, Y) attributes(sumY) <- attributes(template) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) if(variance) { ## variance calculation meanX <- sumX/n meanY <- eval.fv(sumY/n, relabel=FALSE) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- (sumX2 - n * meanX^2)/(n-1) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) } ## tweak labels of main estimate attributes(Ratio) <- attributes(template) if(relabel) Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(!variance) return(Ratio) ## tweak labels of variance terms attributes(Variance) <- attributes(template) Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") attributes(hiCI) <- attributes(loCI) <- attributes(template) hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") ## glue together result <- Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) ## don't plot variances, by default fvnames(result, ".") <- setdiff(fvnames(result, "."), fvnames(Variance, ".")) return(result) } pool.fv }) ## ................................................ pool.rat <- local({ Add <- function(A,B){ force(A); force(B); eval.fv(A+B, relabel=FALSE) } Square <- function(A) { force(A); eval.fv(A^2, relabel=FALSE) } Mul <- function(A,B){ force(A); force(B); eval.fv(A*B, relabel=FALSE) } pool.rat <- function(..., weights=NULL, relabel=TRUE, variance=TRUE) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) ## israt <- unlist(lapply(argh, inherits, what="rat")) if(any(bad <- !israt)) { nbad <- sum(bad) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "contain ratio (numerator/denominator) information")) } isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") ## extract template <- vanilla.fv(argh[[1]]) Y <- lapply(argh, attr, which="numerator") X <- lapply(argh, attr, which="denominator") X <- do.call(harmonise, X) Y <- do.call(harmonise, Y) templateX <- vanilla.fv(X[[1]]) templateY <- vanilla.fv(Y[[1]]) ## compute products if(!is.null(weights)) { check.nvector(weights, narg, things="Functions", vname="weights") X <- Map(Mul, X, weights) Y <- Map(Mul, Y, weights) } ## sum sumX <- Reduce(Add, X) sumY <- Reduce(Add, Y) attributes(sumX) <- attributes(templateX) attributes(sumY) <- attributes(templateY) ## ratio-of-sums Ratio <- eval.fv(sumY/sumX, relabel=FALSE) attributes(Ratio) <- attributes(template) ## variance calculation if(variance) { meanX <- eval.fv(sumX/n, relabel=FALSE) meanY <- eval.fv(sumY/n, relabel=FALSE) sumX2 <- Reduce(Add, lapply(X, Square)) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- eval.fv((sumX2 - n * meanX^2)/(n-1), relabel=FALSE) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1), relabel=FALSE) XY <- Map(Mul, X, Y) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1), relabel=FALSE) ## variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY)), relabel=FALSE) Variance <- eval.fv(Ratio^2 * relvar/n, relabel=FALSE) attributes(Variance) <- attributes(template) ## two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance), relabel=FALSE) loCI <- eval.fv(Ratio - 2 * sqrt(Variance), relabel=FALSE) attributes(hiCI) <- attributes(loCI) <- attributes(template) } ## dress up if(relabel) { Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") if(variance) { Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") } } result <- if(!variance) Ratio else Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) return(result) } pool.rat }) ## ........................................................... pool.fasp <- local({ pool.fasp <- function(...) { Alist <- list(...) Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1L, 40L), "[..]") nA <- length(Alist) if(nA == 0) return(NULL) ## validate.... ## All arguments must be fasp objects notfasp <- !unlist(lapply(Alist, inherits, what="fasp")) if(any(notfasp)) { n <- sum(notfasp) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notfasp)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("fasp")) stop(why) } ## All arguments must have envelopes notenv <- !unlist(lapply(Alist, has.env)) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "contain envelope data") stop(why) } if(nA == 1L) return(Alist[[1L]]) ## All arguments must have the same dimensions witches <- lapply(Alist, getElement, name="which") witch1 <- witches[[1L]] same <- unlist(lapply(witches, identical, y=witch1)) if(!all(same)) stop("Function arrays do not have the same array dimensions") ## OK. ## Pool envelopes at each position result <- Alist[[1L]] fns <- result$fns for(k in seq_along(fns)) { funks <- lapply(Alist, extractfun, k=k) fnk <- do.call(pool.envelope, funks) attr(fnk, "einfo")$Yname <- Yname fns[[k]] <- fnk } result$fns <- fns return(result) } has.env <- function(z) { all(unlist(lapply(z$fns, inherits, what="envelope"))) } extractfun <- function(z, k) { z$fns[[k]] } pool.fasp }) spatstat.explore/R/pcfmulti.inhom.R0000644000176200001440000002134714611073310017055 0ustar liggesusers# # pcfmulti.inhom.R # # $Revision: 1.19 $ $Date: 2023/03/11 06:15:44 $ # # inhomogeneous multitype pair correlation functions # # pcfcross.inhom <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, adjust.bw=1, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, adjust.sigma=1, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) g <- pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, adjust.bw=adjust.bw, stoyan=stoyan, correction=correction, sigma=sigma, adjust.sigma=adjust.sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(g, substitute(g[inhom,i,j](r), list(i=iname,j=jname)), c("g", paste0("list", paren(paste("inhom", i, j, sep=",")))), new.yexp=substitute(g[list(inhom,i,j)](r), list(i=iname,j=jname))) attr(result, "dangerous") <- attr(g, "dangerous") return(result) } pcfdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, adjust.bw=1, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, adjust.sigma=1, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") g <- pcfmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, adjust.bw=adjust.bw, stoyan=stoyan, correction=correction, sigma=sigma, adjust.sigma=adjust.sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(g, substitute(g[inhom, i ~ dot](r), list(i=iname)), c("g", paste0("list(inhom,", iname, "~symbol(\"\\267\"))")), new.yexp=substitute(g[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) if(!is.null(dang <- attr(g, "dangerous"))) { dang[dang == "lambdaJ"] <- "lambdadot" dang[dang == "lambdaIJ"] <- "lambdaIdot" attr(result, "dangerous") <- dang } return(result) } pcfmulti.inhom <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., lambdaX=NULL, r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, adjust.bw=1, stoyan=0.15, correction=c("translate", "Ripley"), sigma=NULL, adjust.sigma=1, varcov=NULL, update=TRUE, leaveoneout=TRUE, Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") # r.override <- !is.null(r) win <- X$window areaW <- area(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) # bandwidth if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/areaW) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/areaW) } hmax <- adjust.bw * hmax ########## indices I and J ######################## if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != npts || length(J) != npts) stop(paste("The length of I and J must equal", "the number of points in the pattern")) nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] ########## intensity values ######################### a <- resolve.lambdacross(X=X, I=I, J=J, lambdaI=lambdaI, lambdaJ=lambdaJ, lambdaX=lambdaX, ..., sigma=sigma, adjust=adjust.sigma, varcov=varcov, leaveoneout=leaveoneout, update=update, Iexplain=Iname, Jexplain=Jname) lambdaI <- a$lambdaI lambdaJ <- a$lambdaJ danger <- a$danger dangerous <- a$dangerous ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, npts/areaW) breaks <- handle.r.b.args(r, breaks, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) fname <- c("g", "list(inhom,I,J)") out <- fv(df, "r", quote(g[inhom,I,J](r)), "theo", , alim, c("r", makefvlabel(NULL, NULL, fname, "pois")), c("distance argument r", "theoretical Poisson %s"), fname=fname, yexp=quote(g[list(inhom,I,J)](r))) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw, adjust=adjust.bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances # identify close pairs of points close <- crosspairs(XI, XJ, rmax+hmax, what="ijd") # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) gT <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(trans=gT), makefvlabel(NULL, "hat", fname, "Trans"), "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt * weight, denargs, areaW)$g out <- bind.fv(out, data.frame(iso=gR), makefvlabel(NULL, "hat", fname, "Ripley"), "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? corrxns <- rev(setdiff(names(out), "r")) # default is to display them all formula(out) <- . ~ r fvnames(out, ".") <- corrxns # unitname(out) <- unitname(X) if(danger) attr(out, "dangerous") <- dangerous return(out) } spatstat.explore/R/wtdclosepair.R0000644000176200001440000000306414611073311016616 0ustar liggesusers#' #' wtdclosepair.R #' #' $Revision: 1.1 $ $Date: 2022/05/22 10:52:22 $ weightedclosepairs <- function(X, r, correction, what=c("all", "indices", "ijd")) { what <- match.arg(what) ## return list(i,j,..,weight) for all r-close pairs switch(correction, none = , border = { cl <- closepairs(X, r, what=what) weight <- rep(1, length(cl$i)) }, isotropic = , Ripley = { if(what == "indices") { cl <- closepairs(X, r, what="ijd") weight <- edge.Ripley(X[cl$i], cl$d) cl <- cl[c("i", "j")] } else { cl <- closepairs(X, r, what=what) weight <- edge.Ripley(X[cl$i], cl$d) } }, translate = { cl <- closepairs(X, r, what="all") weight <- edge.Trans(dx = cl$dx, dy = cl$dy, W = Window(X), paired=TRUE) switch(what, indices = { cl <- cl[c("i", "j")] }, ijd = { cl <- cl[c("i", "j", "d")] }, all = { }) }, periodic = { cl <- closepairs(X, r, what=what, periodic=TRUE) weight <- rep(1, length(cl$i)) }, { warning(paste("Unrecognised correction", sQuote(correction)), call.=FALSE) return(NULL) } ) result <- append(cl, list(weight=as.numeric(weight))) return(result) } spatstat.explore/R/Gest.R0000644000176200001440000001041314611073307015021 0ustar liggesusers# # Gest.S # # Compute estimates of nearest neighbour distance distribution function G # # $Revision: 4.32 $ $Date: 2020/08/25 06:13:10 $ # ################################################################################ # "Gest" <- "nearest.neighbour" <- function(X, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han"), domain=NULL) { verifyclass(X, "ppp") if(!is.null(domain)) stopifnot(is.subset.owin(domain, Window(X))) ## W <- X$window npts <- npoints(X) lambda <- npts/area(W) ## determine r values rmaxdefault <- rmax.rule("G", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max zeroes <- numeric(length(rvals)) ## choose correction(s) # correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "han") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", cs="han", ChiuStoyan="han", best="km"), multi=TRUE) ## compute nearest neighbour distances nnd <- nndist(X$x, X$y) ## distance to boundary bdry <- bdist.points(X) ## restrict to subset ? if(!is.null(domain)) { ok <- inside.owin(X, w=domain) nnd <- nnd[ok] bdry <- bdry[ok] } ## observations o <- pmin.int(nnd,bdry) ## censoring indicators d <- (nnd <= bdry) ## initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(G(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="G") if("none" %in% correction) { ## UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts <= 1) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("han" %in% correction) { if(npts <= 1) G <- zeroes else { ## uncensored distances x <- nnd[d] ## weights a <- eroded.areas(W, rvals, subset=domain) ## calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } ## add to fv object Z <- bind.fv(Z, data.frame(han=G), "hat(%s)[han](r)", "Hanisch estimate of %s", "han") ## modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { ## calculate Kaplan-Meier and border correction (Reduced Sample) estimates want.rs <- "rs" %in% correction want.km <- "km" %in% correction if(npts == 0) { result <- list(rs=zeroes, km=zeroes, hazard=zeroes, theohaz=zeroes) } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) if(want.km) result$theohaz <- 2 * pi * lambda * rvals } wanted <- c(want.rs, rep(want.km, 3L)) wantednames <- c("rs", "km", "hazard", "theohaz")[wanted] result <- as.data.frame(result[wantednames]) ## add to fv object Z <- bind.fv(Z, result, c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(h)[km](r)", "h[pois](r)")[wanted], c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function h(r)", "theoretical Poisson hazard function h(r)")[wanted], if(want.km) "km" else "rs") ## modify recommended plot range attr(Z, "alim") <- with(Z, range(.x[.y <= 0.9])) } nama <- names(Z) fvnames(Z, ".") <- rev(setdiff(nama, c("r", "hazard", "theohaz"))) unitname(Z) <- unitname(X) return(Z) } spatstat.explore/R/Math.fv.R0000644000176200001440000000335114611073307015425 0ustar liggesusers## ## Math.fv.R ## ## Inline arithmetic for 'fv' ## ## $Revision: 1.9 $ $Date: 2023/05/13 01:11:16 $ Math.fv <- function(x, ...){ force(x) eval(substitute(eval.fv(G(x)), list(G=as.name(.Generic), x=quote(x)))) } Complex.fv <- function(z){ force(z) eval(substitute(eval.fv(G(z)), list(G=as.name(.Generic), z=quote(z)))) } Ops.fv <- function(e1,e2=NULL) { m <- match.call() objects <- list() if(is.name(m$e1) || (is.atomic(m$e1) && length(m$e1) == 1)) { ## e1 is the name of an fv object, or is a single value. ## It will appear directly in the resulting function name e1use <- substitute(e1) } else { ## e1 is an expression that should first be evaluated ## It will appear as 'e1' in the resulting function name e1use <- quote(e1) objects$e1 <- eval(e1) } if(is.name(m$e2) || (is.atomic(m$e2) && length(m$e2) == 1)) { e2use <- substitute(e2) } else { e2use <- quote(e2) objects$e2 <- eval(e2) } callframe <- parent.frame() evalframe <- if(length(objects)) list2env(objects, parent=callframe) else callframe eval(substitute(eval.fv(G(e1,e2), envir=evalframe), list(G=as.name(.Generic), e1=e1use, e2=e2use))) } Summary.fv <- local({ Summary.fv <- function(..., na.rm=FALSE){ argh <- list(...) funs <- sapply(argh, is.fv) argh[funs] <- lapply(argh[funs], getValues) do.call(.Generic, c(argh, list(na.rm = na.rm))) } getValues <- function(x) { xdat <- as.matrix(as.data.frame(x)) yall <- fvnames(x, ".") vals <- xdat[, yall] return(as.vector(vals)) } Summary.fv }) spatstat.explore/R/compileK.R0000644000176200001440000001142614611073310015661 0ustar liggesusers# compileK # # Function to take a matrix of pairwise distances # and compile a 'K' function in the format required by spatstat. # # $Revision: 1.16 $ $Date: 2023/08/16 02:07:59 $ # ------------------------------------------------------------------- compileK <- function(D, r, weights=NULL, denom=1, check=TRUE, ratio=FALSE, fname="K", samplesize=denom) { # process r values breaks <- breakpts.from.r(r) rmax <- breaks$max r <- breaks$r # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] } else wvalues <- NULL # count the number of D values in each interval (r[k], r[k+1L]] counts <- whist(Dvalues, breaks=breaks$val, weights=wvalues) # cumulative counts: number of D values in [0, r[k]) Kcount <- cumsum(counts) # calculate estimate Kratio <- Kcount/denom # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=Kratio) labl <- c("r", makefvlabel(NULL, "hat", fname)) K <- fv(df, "r", quote(K(r)), "est", . ~ r , c(0,rmax), labl, c("distance argument r", "estimated %s"), fname=fname) if(ratio) { if(missing(samplesize) || is.null(samplesize)) { Numer <- Kcount Denom <- denom } else { ## adjust numer/denom so that denominator is sample size Numer <- Kcount * samplesize/denom Denom <- samplesize } ## create numerator and denominator as fv objects Knum <- fv(data.frame(r=r, est=Numer), "r", quote(K(r)), "est", . ~ r , c(0,rmax), labl, c("distance argument r", "numerator of estimated %s"), fname=fname) Kden <- fv(data.frame(r=r, est=Denom), "r", quote(K(r)), "est", . ~ r , c(0,rmax), labl, c("distance argument r", "denominator of estimated %s"), fname=fname) K <- rat(K, Knum, Kden, check=FALSE) } return(K) } compilepcf <- function(D, r, weights=NULL, denom=1, check=TRUE, endcorrect=TRUE, ratio=FALSE, ..., fname="g", samplesize=denom) { # process r values breaks <- breakpts.from.r(r) if(!breaks$even) stop("compilepcf: r values must be evenly spaced", call.=FALSE) r <- breaks$r rmax <- breaks$max # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] totwt <- sum(wvalues) normwvalues <- wvalues/totwt } else { nv <- length(Dvalues) normwvalues <- rep.int(1/nv, nv) totwt <- nv } # form kernel estimate rmin <- min(r) rmax <- max(r) nr <- length(r) Ddens <- do.call.matched(density.default, resolve.defaults( list(x=Dvalues, weights=normwvalues, from=rmin, to=rmax, n=nr), list(...), list(warnWbw=FALSE))) gval <- Ddens$y * totwt # normalise gval <- gval/denom # edge effect correction at r = 0 if(endcorrect) { one <- do.call.matched(density.default, resolve.defaults( list(x=seq(rmin,rmax,length=512), bw=Ddens$bw, adjust=1, from=rmin, to=rmax, n=nr), list(...), list(warnWbw=FALSE) )) onefun <- approxfun(one$x, one$y, rule=2) gval <- gval /((rmax-rmin) * onefun(Ddens$x)) } # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=gval) if(!ratio) { g <- fv(df, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } else { if(is.null(samplesize)) samplesize <- denom num <- data.frame(r=r, est=gval * samplesize) den <- data.frame(r=r, est=samplesize) g <- ratfv(df=NULL, numer=num, denom=den, "r", quote(g(r)), "est", . ~ r , c(0,rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname=fname) } attr(g, "bw") <- Ddens$bw return(g) } spatstat.explore/R/markmark.R0000644000176200001440000000415714611073310015726 0ustar liggesusers#' #' markmark.R #' #' Mark-mark scatterplot #' #' $Revision: 1.7 $ $Date: 2018/12/03 10:26:38 $ markmarkscatter <- function(X, rmax, ..., col=NULL, symap=NULL, transform=I, jit=FALSE) { if(!is.ppp(X) && !is.pp3(X) && !is.ppx(X)) stop("X should be a point pattern", call.=FALSE) if(npoints(X) == 0) { warning("Empty point pattern; no plot generated.", call.=FALSE) return(invisible(NULL)) } stopifnot(is.marked(X)) marx <- numeric.columns(marks(X)) nc <- ncol(marx) if(nc == 0) stop("No marks are numeric", call.=FALSE) if(nc > 1) warning("Multiple columns of numeric marks: using the first column", call.=FALSE) marx <- marx[,1,drop=TRUE] transformed <- !missing(transform) marx <- transform(marx) if(jit) marx <- jitter(marx, factor=2.5) if(is.ppp(X) || is.pp3(X)) { cl <- closepairs(X, rmax, what="ijd") } else { D <- pairdist(X) ij <- which(D <= rmax, arr.ind=TRUE) cl <- list(i=ij[,1], j=ij[,2], d=as.numeric(D[ij])) } mi <- marx[cl$i] mj <- marx[cl$j] d <- cl$d ra <- range(marx) Y <- ppp(mi, mj, ra, ra, marks=d, check=FALSE) nY <- npoints(Y) Y <- Y[order(d, decreasing=TRUE)] if(is.null(symap)) { if(is.null(col)) col <- grey(seq(0.9, 0, length.out=128)) if(nY > 0) { rd <- c(0, max(d)) symap <- symbolmap(cols=col, range=rd, size=1, pch=16) } } plot(Y, ..., symap=symap, main="", leg.side="right") axis(1) axis(2) mname <- if(jit && transformed) "Jittered, transformed mark" else if(jit) "Jittered mark" else if(transformed) "Transformed mark" else "Mark" title(xlab=paste(mname, "of first point"), ylab=paste(mname, "of second point")) if(nY >= 2) { mbar2 <- mean(marx)^2 msd2 <- sqrt(2 * var(marx)) hyperbola <- function(x) { mbar2/x } bandline1 <- function(x) { x + msd2 } bandline2 <- function(x) { x - msd2 } curve(hyperbola, from=mbar2/ra[2], to=ra[2], add=TRUE) curve(bandline1, from=ra[1], to=ra[2]-msd2, add=TRUE) curve(bandline2, from=ra[1]+msd2, to=ra[2], add=TRUE) } return(invisible(NULL)) } spatstat.explore/NEWS0000644000176200001440000006657614737444216014331 0ustar liggesusers CHANGES IN spatstat.explore VERSION 3.3-4 OVERVIEW o Minor improvements. SIGNIFICANT USER-VISIBLE CHANGES o reload.or.compute New argument 'exclude' specifies which objects should not be saved. CHANGES IN spatstat.explore VERSION 3.3-3 OVERVIEW o relative risk estimation using diffusion. o smoothing using diffusion. o Tweaks to bandwidth selection. NEW FUNCTIONS o relriskHeat, relriskHeat.ppp Relative risk estimation using diffusion. o blurHeat, blurHeat.im Image smoothing using diffusion. o SmoothHeat, SmoothHeat.ppp Smoothing numerical values observed at points, using diffusion. o bw.relriskHeatppp Bandwidth selection for relriskHeat.ppp SIGNIFICANT USER-VISIBLE CHANGES o bw.ppl Argument 'shortcut' now defaults to TRUE. CHANGES IN spatstat.explore VERSION 3.3-2 OVERVIEW o Tweaks to documentation. o Internal repairs. o Internal changes to satisfy package checker. CHANGES IN spatstat.explore VERSION 3.3-1 OVERVIEW o Internal changes to satisfy package checker. CHANGES IN spatstat.explore VERSION 3.3-0 OVERVIEW o 'spatstat.explore' now depends on package 'spatstat.univar'. o Some functions have been moved to 'spatstat.univar'. o Minor improvements. PACKAGE DEPENDENCE o spatstat.explore now depends on the new package 'spatstat.univar' o Some functions have been moved from 'spatstat.explore' to 'spatstat.univar'. SIGNIFICANT USER-VISIBLE CHANGES o cbind.fv, bind.fv Additional arguments may be functions in the R language. DELETED FUNCTIONS o bw.abram The generic 'bw.abram' has been moved to the new package 'spatstat.univar'. o CDF, CDF.density The generic 'CDF' and method 'CDF.density' have been moved to the new package 'spatstat.univar'. o densityAdaptiveKernel The generic 'densityAdaptiveKernel' has been moved to the new package 'spatstat.univar'. o dkernel, pkernel, qkernel, rkernel These functions have been moved to the new package 'spatstat.univar'. o kernel.factor, kernel.moment, kernel.squint These functions have been moved to the new package 'spatstat.univar'. o kaplan.meier, reduced.sample, km.rs These functions have been moved to the new package 'spatstat.univar'. o quantile.density This method has been moved to the new package 'spatstat.univar'. o stieltjes This function has been moved to the new package 'spatstat.univar'. CHANGES IN spatstat.explore VERSION 3.2-7 OVERVIEW o Bug fixes. o Internal improvements. BUG FIXES o SpatialMedian.ppp, SpatialQuantile.ppp Argument `sigma' was ignored in some calculations. Fixed. CHANGES IN spatstat.explore VERSION 3.2-6 OVERVIEW o We thank Mohomed Abraj, Marcelino de la Cruz and Stephanie Hogg for contributions. o Spatially weighted median and quantile of mark values. o Adaptive estimation of intensity for split point patterns. o Anisotropic bandwidth selection o Boyce index. o Internal improvements. o Bug fixes. NEW FUNCTIONS o SpatialMedian.ppp, SpatialQuantile.ppp Spatially weighted median and quantile of mark values of a point pattern. o boyce Boyce index and continuous Boyce index. o densityAdaptiveKernel.splitppp A method for 'densityAdaptiveKernel' for split point patterns. SIGNIFICANT USER-VISIBLE CHANGES o bw.ppl New argument `varcov1' for anisotropic bandwidth selection. o bw.smoothppp New argument `varcov1' for anisotropic bandwidth selection. BUG FIXES o studpermu.test The code required each group to consist of at least 3 point patterns, rather than 2 point patterns (as stated in the documentation). Fixed. o Jest Ignored pixel resolution argument 'eps'. Fixed. o scanLRTS Pixel resolution arguments 'dimyx', 'eps', 'xy' were not correctly handled. Fixed. CHANGES IN spatstat.explore VERSION 3.2-5 OVERVIEW o Minor corrections in documentation. CHANGES IN spatstat.explore VERSION 3.2-4 OVERVIEW o Integration of functions. o Changed defaults for Clark-Evans Test. o spatstat.explore no longer suggests package 'maptools'. o Minor improvements and bug fixes. NEW FUNCTIONS o integral.fv Compute the integral of a function. SIGNIFICANT USER-VISIBLE CHANGES o density.ppp New argument 'sameas'. o clarkevans.test The asymptotic test is now available for any choice of edge correction. o clarkevans.test New argument 'method' determines whether to use the asymptotic test or Monte Carlo test. The default has changed to method='asymptotic'. o clarkevans.test Default edge correction has changed, to avoid bias. BUG FIXES o kernel.squint The return value was incorrect if the argument 'bw' was given. Fixed. o pcf The variance approximation was calculated incorrectly (due to the bug in kernel.squint). Fixed. o quantile.density Crashed if `probs` contained NA values and `names=FALSE`. Fixed. o bw.smoothppp Crashed if every point in 'X' was duplicated. Fixed. CHANGES IN spatstat.explore VERSION 3.2-3 OVERVIEW o Improvements to envelope methods. o Suppress warning messages from density.default. o Minor improvements and bug fixes. NEW FUNCTIONS o compileCDF Low level utility for calculating cumulative distribution function of distance variable. SIGNIFICANT USER-VISIBLE CHANGES o circdensity Improved output of 'print' method o envelope All methods for `envelope' now accept a summary function in which the function argument is not named 'r'. This includes functions such as `transect.im' and `roc'. o plot.bermantest Improved layout for plots of Berman's Z2 test. o plot.fv New argument 'clip.xlim'. o circdensity Suppress annoying warning messages from density.default. o compileK, compilepcf Suppress annoying warning messages from density.default. o rhohat Suppress annoying warning messages from density.default. BUG FIXES o envelope methods Results were malformed if the name of the function argument was not "r". Fixed. CHANGES IN spatstat.explore VERSION 3.2-1 OVERVIEW o Internal bug fixes. CHANGES IN spatstat.explore VERSION 3.2-0 OVERVIEW o We thank Jonatan Gonzalez for contributions. o Changed the calculation of standard errors in density.ppp and relrisk.ppp. o Inline arithmetic for function tables ('fv') and arrays ('fasp') o Standard error calculation for Smooth.ppp (experimental) o multitype pair correlation functions can save numerator and denominator. o multitype inhomogeneous J functions. o More support for automatic bandwidth selection. o Bug fixes in calculation of standard errors. NEW FUNCTIONS o Math.fv, Complex.fv, Summary.fv, Ops.fv Methods for arithmetic operations for function tables (class 'fv') o Math.fasp, Complex.fasp, Summary.fasp, Ops.fasp Methods for arithmetic operations for function arrays (class 'fasp') o Gcross.inhom, Gdot.inhom Multitype G functions for inhomogeneous point processes. o Jcross.inhom, Jdot.inhom, Jmulti.inhom Multitype J functions for inhomogeneous point processes. o summary.bw.optim, print.summary.bw.optim Method for 'summary' of optimised bandwidth objects (class 'bw.optim'). These are the objects produced by the bandwidth selection functions such as bw.diggle, bw.scott, bw.pcf SIGNIFICANT USER-VISIBLE CHANGES o density.ppp Standard error calculation is now available with any smoothing kernel. o density.ppp The interpretation of 'weights' in the calculation of standard error has changed. New argument 'wtype' controls this interpretation. o relrisk.ppp The interpretation of 'weights' in the calculation of standard error has changed. New argument 'wtype' controls this interpretation. o relrisk.ppp New argument 'fudge' specifies a constant numeric value that will be added to each estimate of point process intensity before calculation of relative risk. o Smooth.ppp Standard error calculation is now supported (Experimental). o pcfcross, pcfdot, pcfmulti New argument 'ratio' makes it possible to save the numerator and denominator of the function estimates, so that estimates can be pooled. o bw.relrisk.ppp Additional arguments '...' are now passed to 'density.ppp'. o eval.fasp Automatically-generated labels have been improved. o relrisk.ppp Issues a warning if numerical underflow is detected. o rhohat.ppp, rhohat.quad New argument 'rule.eps' passed to 'as.mask'. BUG FIXES o density.ppp Calculation of standard error was slightly incorrect if edge=TRUE. Fixed. o relrisk.ppp Calculation of standard error was incorrect for non-Gaussian kernels. Fixed. CHANGES IN spatstat.explore VERSION 3.1-0 OVERVIEW o Pair correlation functions allow more control over smoothing parameters. o Improved support for one-dimensional smoothing kernels. o Bug fixes in plot.fv. o Internal improvements and bug fixes. SIGNIFICANT USER-VISIBLE CHANGES o kernel.moment New arguments 'mean' and 'sd'. Computation accelerated for kernel='cosine' or 'optcosine'. All cases are now computed using analytic expressions, for m <= 2. o bw.abram This function is now generic, with a method for class 'ppp'. o pcfinhom, pcfdot.inhom, pcfcross.inhom New arguments 'adjust.sigma' and 'adjust.bw' allow separate adjustment of the one-dimensional smoothing bandwidth 'bw' and the spatial smoothing bandwidth 'sigma'. BUG FIXES o plot.fv When the argument 'log' was given, the plotted curves were incorrectly clipped, or were missing altogether. Fixed. o plot.fv If 'add=TRUE', and if the existing plot was created using logarithmic axes, the logarithmic scale was ignored. Fixed. o plot.fv Sometimes gave an obscure warning about 'rebound.owin', when 'xlim' or 'ylim' was given. Fixed. CHANGES IN spatstat.explore VERSION 3.0-6 OVERVIEW o Internal improvements and bug fixes. CHANGES IN spatstat.explore VERSION 3.0-5 OVERVIEW o Bug fix in pcf. o We thank Maximilian Hesselbarth for contributions. BUG FIXES o pcf.ppp Estimates were incorrectly scaled (they were incorrectly multiplied by the area of the window.) Spotted by Maximilian Hesselbarth. Bug introduced in spatstat.explore 3.0-0. Fixed. CHANGES IN spatstat.explore VERSION 3.0-4 OVERVIEW o Bug fix in Kest in a very special case. o We thank 'Marjolein9' for contributions. BUG FIXES o Kest Isotropic edge correction weight was computed incorrectly for a data point lying exactly on a corner of a rectangular window. Spotted by GitHub contributor 'Marjolein9'. Fixed. CHANGES IN spatstat.explore VERSION 3.0-3 OVERVIEW o Tweaks to placate package checker. CHANGES IN spatstat.explore VERSION 3.0-2 OVERVIEW o Tweaks to placate package checker. CHANGES IN spatstat.explore VERSION 3.0-1 OVERVIEW o Tweaks to placate package checker. CHANGES IN spatstat.explore VERSION 3.0-0 OVERVIEW o New package o We thank Marie-Colette van Lieshout and Daniel Manrique-Castano for contributions. o Periodic edge correction for K function. o Changed denominator in K function and pair correlation function. o Bandwidth selection for adaptive kernel estimation of intensity. o U-shaped curves in 'rhohat'. o Radial cumulative integral of an image. o Minor improvements. NEW FUNCTIONS o bw.CvL.adaptive Bandwidth selection for adaptive kernel estimation of intensity. o radcumint Radial cumulative integral of an image. SIGNIFICANT USER-VISIBLE CHANGES o Package structure The package 'spatstat.core' has been split into two packages called 'spatstat.explore' (for exploratory data analysis) and 'spatstat.model' (for modelling and formal inference). o spatstat.explore The new package 'spatstat.explore' contains the code for exploratory data analysis and nonparametric analysis of spatial data. Examples include 'density.ppp', 'Kest', 'envelope', 'rhohat', 'clarkevans.test'. o NEWS The NEWS file for the new package 'spatstat.explore' contains older news items from the defunct package 'spatstat.core' (for functions which are now in 'spatstat.explore'). o Kest, Kdot, Kcross, Ldot, Lcross, Kmulti These functions now accept the option 'correction="periodic"' to compute the periodic (toroidal) edge correction estimate. o Kest, pcf, Ksector, Kdot, Kcross, Kmulti When ratio=TRUE, the denominator is now equal to the number of pairs of points considered. This does not affect the estimate of the summary function, but it changes the calculation of pooled estimates when the estimates were obtained from different sized windows. o markcorr, markcrosscorr These functions now allow negative mark values when normalise=FALSE. o marktable This function now works for point patterns in three dimensions (class 'pp3') and point patterns on a network (class 'lpp'). o bw.relrisk This function is now generic, with a method for class 'ppp' o compileK, compilepcf These functions have a new argument 'samplesize'. If 'ratio=TRUE' the numerator and denominator will be rescaled by a common factor so that the denominator is equal to 'samplesize'. o adaptive.density Now accepts 'method="nearest"' and passes the data to 'nndensity'. o rhohat.ppp New options 'smoother="mountain"' and 'smoother="valley"' for estimating a unimodal function (U-shaped curve). o rhohat.ppp If the covariate is a 'distfun', the name of the unit of length is saved and displayed on the plot. o rhohat.ppp New arguments 'jitter', 'jitterfactor', 'interpolate' allow greater control over the calculation. o rhohat.ppp New argument 'do.CI' specifies whether to calculate confidence bands. CHANGES IN spatstat.core VERSION 2.4-4.010 OVERVIEW o Internal improvements. CHANGES IN spatstat.core VERSION 2.4-4 OVERVIEW o Bug fixes and minor improvements. BUG FIXES o rhohat.ppp The argument 'subset' was not handled correctly in the internal data. The estimated function 'rho' was correct, but if 'predict.rhohat' was applied, predictions were computed only in the 'subset', and were possibly incorrect values. Fixed. o Gfox, Jfox Warnings were issued about text formatting errors (mentioning 'sprintf' or 'fmt'). Fixed. CHANGES IN spatstat.core VERSION 2.4-3 OVERVIEW o We thank Art Stock for contributions. o Bug fixes and minor improvements. BUG FIXES o Smooth.ppp Crashed when 'kernel' was a function, 'at="points"' and 'scalekernel=FALSE'. Fixed. o Finhom, Ginhom, Jinhom Crashed when ratio=TRUE. [Spotted by Art Stock.] Fixed. o envelope Crashed for some of the summary functions when ratio=TRUE. [Spotted by Art Stock.] Fixed. o "[.rat" Crashed in some cases. Fixed. o Kcross The result of Kcross() was 'invisible', i.e. it was not automatically printed. Fixed. CHANGES IN spatstat.core VERSION 2.4-2 OVERVIEW o Internal bug fixes. CHANGES IN spatstat.core VERSION 2.4-1 OVERVIEW o We thank Frederic Lavancier, Sebastian Meyer, Suman Rakshit and Sven Wagner for contributions. o Improved approximation of intensity of Gibbs models. o Experimental code to represent (theoretical) point process models o Extract more information about a point process model. o Internal improvements and bug fixes. SIGNIFICANT USER-VISIBLE CHANGES o relrisk.ppp Ratios which are close to 0/0 are handled more effectively, reducing the likelihood of strange-looking plots when 'sigma' is very small. BUG FIXES o density.ppp Crashed if the observation window had zero area. Fixed. o dirichletVoronoi.ppp Crashed randomly, with obscure error messages from 'im' or 'eval.im', when argument 'f' had a small value. [Spotted by Suman Rakshit.] Fixed. o dirichletVoronoi.ppp Rarely, produced an image containing NA values. [Spotted by Suman Rakshit.] Fixed. o vcov.ppm Crashed in some cases, with message 'object lamdel not found'. [Spotted by Sven Wagner.] Fixed. CHANGES IN spatstat.core VERSION 2.4-0 OVERVIEW o We thank Sriram Ramamurthy for contributions. o spatstat.core now depends on the new package 'spatstat.random'. o Functions for generating random patterns have been removed. o Minor improvements and bug fixes SIGNIFICANT USER-VISIBLE CHANGES o package structure The code for generating random spatial patterns (including 'rpoispp', 'rMatClust', 'rThomas', 'rNeymanScott', 'rStrauss', 'rmh') has been removed from 'spatstat.core' and placed in a new package 'spatstat.random'. This new package is required by 'spatstat.core'. o reload.or.compute New argument 'context' BUG FIXES o reload.or.compute Scoping error (crashed sometimes if called from a non-global environment). Fixed. CHANGES IN spatstat.core VERSION 2.3-2 OVERVIEW o Minor improvements and bug fixes. o We thank Jonas Brehmer for contributions. SIGNIFICANT USER-VISIBLE CHANGES o pcf Improved error message BUG FIXES o edge.Ripley Results were incorrect for data points lying exactly at the corners of a rectangle. Fixed. CHANGES IN spatstat.core VERSION 2.3-1 OVERVIEW o Covariates in ppm and mppm may be functions that depend on the marks as well as the spatial coordinates. o Automatic selection of threshold for defining a binary predictor. o Random perturbation of line segments. o Minor extensions, performance improvements, and bug fixes. NEW FUNCTIONS o thresholdSelect, thresholdCI Select the optimal threshold for converting a numerical predictor to a binary predictor. o coef<-.fii Changes the coefficients of a fitted interaction object (a method for the generic "coef<-") SIGNIFICANT USER-VISIBLE CHANGES o distcdf Improved regularisation algorithm. Argument 'nr=NULL' is now accepted. New argument 'delta' allows the result to be interpolated onto a finer grid. o collapse.fv Columns identified by the arguments 'same' and 'different' may now be absent from some of the 'fv' objects that will be collapsed. o Kest When the argument 'domain' is given, the calculation of estimates of K(r) has changed slightly, to adhere more closely to the description in the help file. o reload.or.compute Now prints a message indicating whether the data were recomputed or reloaded from file. New argument 'verbose'. o pool.envelope Now uses the value of 'nrank' which was used in the original envelopes. o Kmulti New argument 'rmax'. o Kinhom No longer issues a warning about changed behaviour in the case where 'lambda' is a fitted model. o pcfinhom No longer issues a warning about changed behaviour in the case where 'lambda' is a fitted model. BUG FIXES o segregation.test The test statistic was calculated as the mean, rather than the sum, of discrepancies between probabilities. (The p-value was not affected.) Fixed. o Kest If 'domain' was specified, 'rmax' was ignored. Fixed. o edge.Ripley Value was incorrect for a point lying exactly on a corner. Fixed. o edge.Ripley Crashed when method="interpreted", if a point lay exactly on a corner. Fixed. o plot.fv, plot.envelope Crashed when trying to display a significance band of width zero around a constant function. Fixed. o collapse.fv Crashed if 'length(same) > 1'. Fixed. CHANGES IN spatstat.core VERSION 2.3-0 OVERVIEW o Transect of an image along a curve. o Image cross-correlation and cross-covariance. o Minor bug fixes. NEW FUNCTIONS o cov.im, cor.im Correlation or covariance between several pixel images. SIGNIFICANT USER-VISIBLE CHANGES o transect.im New argument 'curve' allows the user to specify a curved transect. BUG FIXES o rhohat The rug plot (produced by plot.rhohat) was incorrect when rhohat was called with method="piecewise". Fixed. o markcrosscorr Did not recognise the option 'correction="none"'. Fixed. o roc.ppp The default plot of the result of roc.ppp did not include the diagonal line 'y=x'. Fixed. CHANGES IN spatstat.core VERSION 2.2-0 OVERVIEW o We thank Abdollah Jalilian, Yongtao Guan and Rasmus Waagepetersen for contributions. o estimation of the spatial covariance function of a pixel image o simulation of the product shot noise Cox process. o extensions to rhohat NEW FUNCTIONS o rPSNCP Generate simulated realisations of the product shot noise Cox process. Contributed by Abdollah Jalilian, Yongtao Guan and Rasmus Waagepetersen. o spatcov Estimate the spatial covariance function of a pixel image. o pairMean Compute the mean of a specified function of interpoint distance between random points in a window. SIGNIFICANT USER-VISIBLE CHANGES o rhohat New option (smoother='piecewise') computes a piecewise-constant estimate of rho(z). o rhohat The result now includes the 'average' intensity rho. o distcdf Arguments which are NULL will be treated as missing. o distcdf New argument 'savedenom'. CHANGES IN spatstat.core VERSION 2.1-2 OVERVIEW o Reduced CRAN check time. CHANGES IN spatstat.core VERSION 2.1-1 OVERVIEW o Minor bug fix CHANGES IN spatstat.core VERSION 2.1-0 OVERVIEW o densityfun.ppp handles query points outside original window o Minor improvements and bug fixes. SIGNIFICANT USER-VISIBLE CHANGES o densityfun.ppp The resulting function can now handle query points which lie outside the window of the original data, and has argument 'drop=TRUE' which specifies how to handle them. o rpoint New argument 'forcewin' forces the code to use the window 'win' when 'f' is a pixel image. BUG FIXES o cdf.test Crashed if the covariate was constant. Fixed. CHANGES IN spatstat.core VERSION 2.0-0 OVERVIEW o We thank Tilman Davies, Greg McSwiggan and Suman Rakshit for contributions. o We thank Corey Anderson, Michael Chirico, Andy Craig, Marcelino de la Cruz, Tilman Davies, Pavel Fibich, Kurt Hornik, Gopalan Nair, Yonatan Rosen and Rasmus Waagepetersen for contributions. o Diffusion kernel smoothing. o More support for spatial logistic regression models. o predict.mppm now works for multitype point process models. o Improved handling of 'newdata' in predict.mppm. o More support for multi-dimensional patterns. NEW FUNCTIONS o densityHeat New generic function for diffusion kernel estimation of intensity o densityHeat.ppp Diffusion kernel estimation of intensity for point pattern in 2 dimensions. This is an alternative to density.ppp. o intersect.boxx Compute intersection of boxes in multi-dimensional space o scale.boxx, scale.ppx Methods for 'scale' for boxes and patterns in multi-dimensional space o shift.boxx, shift.ppx Methods for 'shift' for boxes and patterns in multi-dimensional space o is.boxx Determine whether an object is a multidimensional box SIGNIFICANT USER-VISIBLE CHANGES o rotmean The result now has the same 'unitname' as the input object X. New argument 'adjust' controls the smoothing bandwidth. o rlabel New argument 'group' specifies that the points are divided into several groups, and that relabelling is applied within each group. o Kcross, Gcross, Jcross Function labels (shown on the plot legend) have been improved when i = j. o anova.mppm Issues a warning when applied to random-effects models (models fitted using the argument 'random'). BUG FIXES o Gest If correction="rs" or correction="km", then both the reduced-sample (border correction) and Kaplan-Meier corrected estimates were calculated. [Spotted by Gopalan Nair.] Fixed. o simulate.rhohat Crashed when applied to rhohat objects computed from data on a linear network. Fixed. CHANGES IN spatstat.core VERSION 1.65-11 OVERVIEW o Internal tweaks. CHANGES IN spatstat.core VERSION 1.65-10 OVERVIEW o Minor corrections to documentation. CHANGES IN spatstat.core VERSION 1.65-9 OVERVIEW o We thank Ian Buller for a suggestion. o weights permitted in density calculation for line segments. SIGNIFICANT USER-VISIBLE CHANGES o density.psp New argument 'weights'. CHANGES IN spatstat.core VERSION 1.65-8 OVERVIEW o Minor changes to appease the compiler. CHANGES IN spatstat.core VERSION 1.65-7 OVERVIEW o We thank Michael Chirico for a contribution. o Minor changes to appease the compiler. CHANGES IN spatstat.core VERSION 1.65-6 OVERVIEW o We thank Tilman Davies and Pavel Fibich for contributions. o Increased speed for large datasets. SIGNIFICANT USER-VISIBLE CHANGES o rSSI Accelerated. o overall speed Changes have been made to the internal code of spatstat which should accelerate computations involving large datasets. o localpcf, localpcfinhom New argument 'rvalue'. BUG FIXES o rLGCP Simulation results for log-Gaussian Cox processes were incorrect unless the pixel dimensions and pixel spacings were identical on the horizontal and vertical axes. (If pixel dimensions were not specified, then the results were incorrect whenever the Frame of the simulation window was not a square.) [Spotted by Tilman Davies.] Fixed. o Vmark Crashed if normalise=TRUE when there was only one column of marks. (Spotted by Pavel Fibich.) Fixed. o nnclean Crashed if k >= npoints(X). Fixed. CHANGES IN spatstat.core VERSION 1.65-5 OVERVIEW o Minor changes required by CRAN. CHANGES IN spatstat.core VERSION 1.65-1 OVERVIEW o Added NEWS file. CHANGES IN spatstat.core VERSION 1.65-0 OVERVIEW o Package initialised at version 1.65-0 SIGNIFICANT USER-VISIBLE CHANGES o spatstat.core The package 'spatstat.core' has been created from a subset of the code in the original 'spatstat' package version 1.65-0. It contains the core functionality for statistical analysis of spatial data. For an overview, see help("spatstat.core-package") o Execution The 'spatstat.core' package is slightly faster than the corresponding code in the 'spatstat' package, because the procedure for calling internal C functions has been streamlined. spatstat.explore/src/0000755000176200001440000000000014611073330014357 5ustar liggesusersspatstat.explore/src/g3.c0000755000176200001440000001254314611073311015043 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.4 $ $Date: 2022/10/21 10:43:01 $ G function (nearest neighbour distribution) of 3D point pattern Let b = distance from point p[i] to boundary of box d = distance from p[i] to nearest p[j] method = 1 naive ratio estimator (Ripley 1981) numerator(r) = count(i: b >= r, d <= r) denominator(r) = count(i: b >= r) method = 2 minus sampling estimator numerator(r) = count(i: b >= r, d <= r) denominator(r) = lambda * volume(x: b >= r) where lambda = (no of points)/volume(box) method = 3 Hanisch's G3 numerator(r) = count(i: b >= d, d <= r) denominator(r) = count(i: b >= d) method = 4 Hanisch's G4 numerator(r) = count(i: b >= d, d <= r) denominator(r) = fudge * volume(x: b >= r) fudge = numerator(R)/denominator(R) R = sup{r : denominator(r) > 0 } # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2012, 2022. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #define MIN(X,Y) (((X) > (Y)) ? (Y) : (X)) double * nndist3( /* compute nearest neighbour distance for each p[i] */ Point *p, int n, Box *b ) { register int i, j; register double dx, dy, dz, dist2, nearest2, huge2; Point *ip, *jp; double *nnd; nnd = (double *) R_alloc(n, sizeof(double)); dx = b->x1 - b->x0; dy = b->y1 - b->y0; dz = b->z1 - b->z0; huge2 = 2.0 * (dx * dx + dy * dy + dz * dz); /* scan each point and find closest */ for( i = 0; i < n; i++) { ip = p + i; nearest2 = huge2; for(j = 0; j < n; j++) if(j != i) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist2 = dx * dx + dy * dy + dz * dz; if(dist2 < nearest2) nearest2 = dist2; } nnd[i] = sqrt(nearest2); } return(nnd); } double * border3( /* compute distances to border */ Point *p, int n, Box *b ) { register int i; register double bord; register Point *ip; double *bored; bored = (double *) R_alloc(n, sizeof(double)); for( i = 0; i < n; i++) { ip = p + i; bord = MIN(ip->x - b->x0, b->x1 - ip->x); bord = MIN(bord, ip->y - b->y0); bord = MIN(bord, b->y1 - ip->y); bord = MIN(bord, ip->z - b->z0); bord = MIN(bord, b->z1 - ip->z); bored[i] = bord; } return(bored); } void g3one( Point *p, int n, Box *b, Ftable *g ) { register int i, l, lbord, lnnd; double dt; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ for(l = 0; l < g->n; l++) (g->num)[l] = (g->denom)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { lbord = floor( (bord[i] - g->t0) / dt ); if(lbord >= g->n) lbord = g->n - 1; for(l = 0; l <= lbord; l++) (g->denom)[l] += 1.0; lnnd = ceil( (nnd[i] - g->t0) / dt ); if(lnnd < 0) lnnd = 0; for(l = lnnd; l <= lbord; l++) (g->num)[l] += 1.0; } /* compute ratio */ for(l = 0; l < g->n; l++) (g->f)[l] = ((g->denom)[l] > 0)? (g->num)[l] / (g->denom)[l] : 1.0; } void g3three( Point *p, int n, Box *b, Ftable *g ) { register int i, l, lmin; double dt; int denom; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ denom = 0; for(l = 0; l < g->n; l++) (g->num)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { if(nnd[i] <= bord[i]) { ++denom; lmin = ceil( (nnd[i] - g->t0) / dt ); if(lmin < 0) lmin = 0; for(l = lmin; l < g->n; l++) (g->num)[l] += 1.0; } } /* compute ratio */ for(l = 0; l < g->n; l++) { (g->denom)[l] = denom; (g->f)[l] = (denom > 0)? (g->num)[l] / (double) denom : 1.0; } } void g3cen( Point *p, int n, Box *b, H4table *count ) { register int i, lcen, lobs; register double dt, cens, obsv; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* spacing of histogram cells */ dt = (count->t1 - count->t0)/(count->n - 1); /* 'count' is assumed to have been initialised */ for(i = 0; i < n; i++) { obsv = nnd[i]; cens = bord[i]; lobs = ceil( (obsv - count->t0) / dt ); lcen = floor( (cens - count->t0) / dt ); if(obsv <= cens) { /* observation is uncensored; increment all four histograms */ if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) { (count->obs)[lobs]++; (count->nco)[lobs]++; } if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) { (count->cen)[lcen]++; (count->ncc)[lcen]++; } } else { /* observation is censored; increment only two histograms */ lobs = MIN(lobs, lcen); if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) (count->obs)[lobs]++; if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) (count->cen)[lcen]++; } } } spatstat.explore/src/digber.c0000755000176200001440000000240414611073311015761 0ustar liggesusers/* digber.c Diggle-Berman function J used in bandwidth selection J(r) = \int_0^(2r) phi(t, r) dK(t) where K is the K-function and phi(t, r) = 2 r^2 * (acos(y) - y sqrt(1 - y^2)) where y = t/(2r). $Revision: 1.10 $ $Date: 2022/10/22 10:09:51 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include double sqrt(double x); double acos(double x); /* r is the vector of distance values, starting from 0, with length nr, equally spaced. dK = diff(K) is the vector of increments of the K-function, with length ndK = nr-1. values of J are computed only up to max(r)/2 nrmax = floor(nr/2). */ void digberJ( /* inputs */ double *r, double *dK, int *nr, int *nrmax, int *ndK, /* output */ double *J ) { int i, j, Ni, NdK; double ri, twori, tj, y, phiy, integral; Ni = *nrmax; NdK = *ndK; J[0] = 0.0; for(i = 1; i < Ni; i++) { ri = r[i]; twori = 2 * ri; integral = 0.0; for(j = 0; j < NdK; j++) { tj = r[j]; y = tj/twori; if(y >= 1.0) break; phiy = acos(y) - y * sqrt(1 - y * y); integral += phiy * dK[j]; } J[i] = 2 * ri * ri * integral; } } spatstat.explore/src/KrectFunDec.h0000755000176200001440000000531714611073311016675 0ustar liggesusers/* KrectFunDec.h $Revision: 1.5 $ $Date: 2022/10/21 10:43:01 $ Function declarations for Krect Macros: FNAME function name WEIGHTED #defined for weighted version (Kinhom etc) +++ Copyright (C) Adrian Baddeley 2014-2022 ++++ */ void FNAME( /* input data */ double *width, double *height, /* window is (0, width) x (0, height) */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ #ifdef WEIGHTED double *w, /* weights (e.g. reciprocal intensities) */ #endif /* algorithm parameters */ int *nr, /* number of r values */ double *rmax, /* maximum r value */ double *trimedge, /* maximum edge correction weight */ int *doIso, /* logical: whether to do isotropic correction */ int *doTrans, /* logical: whether to do translation correction */ int *doBord, /* logical: whether to do border correction */ int *doUnco, /* logical: whether to do uncorrected estimator */ /* outputs */ /* These are vectors of length nr if required, otherwise ignored */ double *iso, /* isotropic-corrected estimator */ double *trans, /* translation-corrected estimator */ COUNTTYPE *bnumer, /* numerator of border-corrected estimator */ COUNTTYPE *bdenom, /* denominator of border-corrected estimator */ COUNTTYPE *unco /* uncorrected estimator */ ) { int i, j, l, ldist, lbord, M, maxchunk, N, Nr, N1, Nr1; double rstep, Rmax, R2max, wide, high, trim; double xi, yi, bdisti, bx, by, bratio; double dx, dy, dx2, dij, dij2, dratio, edgetrans, edgeiso; double dL, dR, dD, dU, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double aL, aR, aD, aU, cL, cR, cU, cD, extang; int ncor, corner; COUNTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; COUNTTYPE naccum, daccum; double accum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WIJ wij #else #define ZERO 0 #define WIJ 1 #endif N = *nxy; if(N == 0) return; Nr = *nr; Rmax = *rmax; trim = *trimedge; N1 = N - 1; Nr1 = Nr - 1; R2max = Rmax * Rmax; rstep = Rmax/Nr1; wide = *width; high = *height; /* Allocate and initialise scratch space - for border correction, but do it in all cases to keep the compiler happy */ M = (*doBord == 1) ? Nr : 1; numerLowAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); numerHighAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); denomAccum = (COUNTTYPE *) R_alloc(M, sizeof(COUNTTYPE)); for(l = 0; l < M; l++) numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; #include "KrectV1.h" } #undef ZERO #undef WIJ spatstat.explore/src/looptest.h0000755000176200001440000000030214611073311016376 0ustar liggesusers/* looptest.h Utilities for looping $Revision: 1.1 $ $Date: 2014/09/19 00:47:34 $ */ /* a small value relative to threshold X, for loop exit test */ #define EPSILON(X) ((X)/64) spatstat.explore/src/sphefrac.c0000755000176200001440000000650114611073311016322 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.4 $ $Date: 2022/10/22 10:04:19 $ Routine for calculating surface area of sphere intersected with box # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2013, 2022 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif static double pi = 3.141592653589793; /* Factor of 4 * pi * r * r IS ALREADY TAKEN OUT */ double sphesfrac( Point *point, Box *box, double r ) { double sum, p[4], q[4]; double a1(double t, double r); double a2(double t1, double t2, double r); double a3(double t1, double t2, double t3, double r); int i, j; p[1] = point->x - box->x0; p[2] = point->y - box->y0; p[3] = point->z - box->z0; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += a1(p[i],r) + a1(q[i],r); #ifdef DEBUG Rprintf("i = %d, a1 = %f, a1 = %f\n", i, a1(p[i],r), a1(q[i],r)); #endif } DBG("Past a1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= a2(p[i], p[j], r) + a2(p[i], q[j], r) + a2(q[i], p[j], r) + a2(q[i], q[j], r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past a2", sum) sum += a3(p[1], p[2], p[3], r) + a3(p[1], p[2], q[3], r); DBG("sum", sum) sum += a3(p[1], q[2], p[3], r) + a3(p[1], q[2], q[3], r); DBG("sum", sum) sum += a3(q[1], p[2], p[3], r) + a3(q[1], p[2], q[3], r); DBG("sum", sum) sum += a3(q[1], q[2], p[3], r) + a3(q[1], q[2], q[3], r); DBG("Past a3", sum) return(1 - sum); } double a1(double t, double r) { /* This is the function A1 divided by 4 pi r^2 */ if(t >= r) return(0.0); return((1 - t/r) * 0.5); } double a2(double t1, double t2, double r) { double c2(double a, double b); /* This is A2 divided by 4 pi r^2 because c2 is C divided by pi */ return(c2( t1 / r, t2 / r) / 2.0); } double a3(double t1, double t2, double t3, double r) { double c3(double a, double b, double c); /* This is A3 divided by 4 pi r^2 because c3 is C divided by pi */ return(c3(t1 / r, t2 / r, t3 / r) / 4.0); } double c2(double a, double b) { double z, z2; /* This is the function C(a, b, 0) divided by pi - assumes a, b > 0 */ if( ( z2 = 1.0 - a * a - b * b) < 0.0 ) return(0.0); z = sqrt(z2); return((atan2(z, a * b) - a * atan2(z, b) - b * atan2(z, a)) / pi); } double c3(double a, double b, double c) { double za, zb, zc, sum; /* This is C(a,b,c) divided by pi. Arguments assumed > 0 */ if(a * a + b * b + c * c >= 1.0) return(0.0); za = sqrt(1 - b * b - c * c); zb = sqrt(1 - a * a - c * c); zc = sqrt(1 - a * a - b * b); sum = atan2(zb, a * c) + atan2(za, b * c) + atan2(zc, a * b) - a * atan2(zb, c) + a * atan2(b, zc) - b * atan2(za, c) + b * atan2(a, zc) - c * atan2(zb, a) + c * atan2(b, za); return(sum / pi - 1); } spatstat.explore/src/localpcf.h0000755000176200001440000000454414611073311016324 0ustar liggesusers/* localpcf.h Source template for versions of local pair correlation Requires variable: WEIGHTED Assumes point patterns are sorted in increasing order of x coordinate $Revision: 1.7 $ $Date: 2022/10/21 10:43:01 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifdef WEIGHTED #define FNAME locWpcfx #else #define FNAME locpcfx #endif void FNAME( int *nn1, double *x1, double *y1, int *id1, int *nn2, double *x2, double *y2, int *id2, #ifdef WEIGHTED double *w2, #endif int *nnr, double *rmaxi, double *del, /* output */ double *pcf /* matrix of column vectors of pcf's for each point of first pattern */ ) { int n1, n2, nr, i, j, k, jleft, kmin, kmax, id1i, maxchunk; double x1i, y1i, rmax, delta, xleft, dx, dy, dx2; double d2, d2max, dmax, d; double rstep, rvalue, frac, contrib, weight, coef; n1 = *nn1; n2 = *nn2; nr = *nnr; rmax = *rmaxi; delta = *del; dmax = rmax + delta; /* maximum relevant value of interpoint distance */ d2max = dmax * dmax; rstep = rmax/(nr-1); coef = 3.0 /(4.0 * delta); if(n1 == 0 || n2 == 0) return; jleft = 0; OUTERCHUNKLOOP(i, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n1, maxchunk, 8196) { x1i = x1[i]; y1i = y1[i]; id1i = id1[i]; /* adjust starting point */ xleft = x1i - dmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from jleft until |dx| > dmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > d2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= d2max && id2[j] != id1i) { d = sqrt(d2); kmin = (int) floor((d-delta)/rstep); kmax = (int) ceil((d+delta)/rstep); if(kmin <= nr-1 && kmax >= 0) { /* nonempty intersection with range of r values */ /* compute intersection */ if(kmin < 0) kmin = 0; if(kmax >= nr) kmax = nr-1; /* */ weight = coef/d; #ifdef WEIGHTED weight = weight * w2[j]; #endif for(k = kmin; k <= kmax; k++) { rvalue = k * rstep; frac = (d - rvalue)/delta; /* Epanechnikov kernel with halfwidth delta */ contrib = (1 - frac * frac); if(contrib > 0) pcf[k + nr * i] += contrib * weight; } } } } } } } #undef FNAME spatstat.explore/src/raster.c0000755000176200001440000000200414611073311016021 0ustar liggesusers/* raster.c shape_raster() initialise a Raster structure $Revision: 1.2 $ $Date: 2022/10/22 02:32:10 $ */ #include #include "raster.h" void shape_raster( /* the raster structure to be initialised */ Raster *ras, /* pointer to data storage for pixel values */ void *data, /* range of GRID COORDS excluding margin */ double xmin, double ymin, double xmax, double ymax, /* absolute dimensions of storage array */ int nrow, int ncol, /* margins for working */ int mrow, int mcol ) { ras->data = data; ras->nrow = nrow; ras->ncol = ncol; ras->length = nrow * ncol; ras->rmin = mrow; ras->rmax = nrow - mrow - 1; ras->cmin = mcol; ras->cmax = ncol - mcol - 1; ras->x0 = ras->xmin = xmin; ras->x1 = ras->xmax = xmax; ras->y0 = ras->ymin = ymin; ras->y1 = ras->ymax = ymax; ras->xstep = (xmax-xmin)/(ncol - 2 * mcol - 1); ras->ystep = (ymax-ymin)/(nrow - 2 * mrow - 1); /* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep); */ } spatstat.explore/src/corrections.c0000755000176200001440000000256314611073311017065 0ustar liggesusers/* corrections.c Edge corrections $Revision: 1.17 $ $Date: 2021/10/25 10:18:31 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include #include #include "chunkloop.h" #include "yesno.h" #include "constants.h" /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define BETWEEN(X,X0,X1) ((double) ( ( (X) - (X0) ) * ( (X) - (X1) ) ) <= 0.0) #define UNDER(X,Y,X0,Y0,X1,Y1) \ ((double) ( ( (Y1) - (Y0) ) * ( (X) - (X0) ) ) >= (double) ( ( (Y) - (Y0) ) * ( (X1) - (X0) ) ) ) #define UNDERNEATH(X,Y,X0,Y0,X1,Y1) \ ((((double) (X0)) < ((double) (X1))) ? UNDER(X,Y,X0,Y0,X1,Y1) : UNDER(X,Y,X1,Y1,X0,Y0)) #define TESTINSIDE(X,Y,X0,Y0,X1,Y1) \ (BETWEEN(X,X0,X1) && UNDERNEATH(X, Y, X0, Y0, X1, Y1)) /* C function ripleybox */ #undef DEBUGBOX #define RIPLEYFUN ripleybox #include "ripleybox.h" #undef RIPLEYFUN /* C function ripboxDebug */ #define DEBUGBOX #define RIPLEYFUN ripboxDebug #include "ripleybox.h" #undef RIPLEYFUN #undef DEBUGBOX /* C function ripleypoly */ #undef DEBUGPOLY #define RIPLEYFUN ripleypoly #include "ripleypoly.h" #undef RIPLEYFUN /* C function rippolDebug */ #define DEBUGPOLY #define RIPLEYFUN rippolDebug #include "ripleypoly.h" #undef RIPLEYFUN #undef DEBUGPOLY spatstat.explore/src/loccums.h0000755000176200001440000000411714611073311016202 0ustar liggesusers/* loccums.h C template for loccum.c data-to-data functions $Revision: 1.7 $ $Date: 2022/10/21 10:43:01 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME( /* inputs */ int *n, double *x, double *y, double *v, int *nr, double *rmax, /* output */ double *ans /* matrix of column vectors of functions for each point */ ) { int N, Nr, Nans; double Rmax; int i, j, k, kmin, maxchunk, columnstart; double Rmax2, rstep, xi, yi; double dx, dy, dx2, d2, d, contrib; N = *n; Nr = *nr; Rmax = *rmax; if(N == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; Nans = Nr * N; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } OUTERCHUNKLOOP(i, N, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 8196) { xi = x[i]; yi = y[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* process backward until |dx| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } /* process forward until |dx| > Rmax */ if(i < N - 1) { for(j=i+1; j < N; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } } spatstat.explore/src/KrectIncrem.h0000755000176200001440000000463614611073311016751 0ustar liggesusers/* KrectIncrem.h Code to increment numerators of K-function $Revision: 1.6 $ $Date: 2022/11/08 01:37:41 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ */ #ifdef WEIGHTED wj = w[j]; wij = wi * wj; #endif /* determine index of entry to be incremented */ dij = (double) sqrt(dij2); dratio = dij/rstep; /* smallest integer greater than or equal to dratio */ ldist = (int) ceil(dratio); #ifdef UNCORRECTED /* ............ uncorrected estimate ................. */ #ifdef WEIGHTED unco[ldist] += wij; #else (unco[ldist])++; #endif #endif #ifdef BORDER /* ............ border correction ................. */ /* increment numerator for all r such that dij <= r < bi */ /* increment entries ldist to lbord inclusive */ #ifdef WEIGHTED if(lbord >= ldist) { numerLowAccum[ldist] += wij; numerHighAccum[lbord] += wij; } #else if(lbord >= ldist) { (numerLowAccum[ldist])++; (numerHighAccum[lbord])++; } #endif #endif #ifdef TRANSLATION /* ............ translation correction ................. */ edgetrans = 1.0/((1.0 - ABS(dx)/wide) * (1.0 - ABS(dy)/high)); edgetrans = MIN(edgetrans, trim); #ifdef WEIGHTED trans[ldist] += wij * edgetrans; #else trans[ldist] += edgetrans; #endif #endif #ifdef ISOTROPIC /* ............ isotropic correction ................. */ /* half the angle subtended by the intersection between the circle of radius d[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < dij) ? acos(dL/dij) : 0.0; aR = (dR < dij) ? acos(dR/dij) : 0.0; aD = (dD < dij) ? acos(dD/dij) : 0.0; aU = (dU < dij) ? acos(dU/dij) : 0.0; /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1.0/4.0; /* edge correction factor */ edgeiso = 1.0 / (1.0 - extang); edgeiso = MIN(edgeiso, trim); #ifdef WEIGHTED iso[ldist] += wij * edgeiso; #else iso[ldist] += edgeiso; #endif #endif spatstat.explore/src/raster.h0000755000176200001440000000523014611073311016032 0ustar liggesusers/* raster.h Definition of raster structures & operations requires (for floor()) $Revision: 1.6 $ $Date: 2022/03/15 02:19:08 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ typedef struct Raster{ /* array of data */ char *data; /* coerced to appropriate type */ int nrow; /* dimensions of entire array */ int ncol; int length; int rmin; /* position of valid subrectangle */ int rmax; int cmin; int cmax; /* definition of mapping into continuous space */ double x0; /* position of entry (rmin,cmin) */ double y0; double x1; /* position of entry (rmax,cmax) */ double y1; double xstep; /* x increment for each column step */ double ystep; /* y increment for each row step */ /* xstep = (x1 - x0)/(cmax - cmin) = (x1 - x0)/(number of valid columns - 1) CAN BE POSITIVE OR NEGATIVE */ /* ranges of grid coordinates */ double xmin; /* = min{x0,x1} */ double xmax; double ymin; double ymax; /* limits of enclosing frame are xmin-xstep/2, xmax+xstep/2 etc. */ } Raster; /* how to clear the data */ #define Clear(ARRAY,TYPE,VALUE) \ { unsigned int i; TYPE *p; \ for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \ *p = VALUE; } /* how to index a rectangular array stored sequentially in row-major order */ #define Entry(ARRAY,ROW,COL,TYPE) \ ((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)] /* test for indices inside subrectangle */ #define Inside(ARRAY,ROW,COL) \ ( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \ (COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax)) /* how to compute the position in R^2 corresponding to a raster entry */ #define Xpos(ARRAY,COL) \ ((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin)) #define Ypos(ARRAY,ROW) \ ((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin)) #define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceTo(X,Y,ARRAY,ROW,COL)\ Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) #define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceToSquared(X,Y,ARRAY,ROW,COL)\ DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) /* how to map a point (x,y) in R^2 to a raster entry */ /* (x,y) is guaranteed to lie in the rectangle bounded by the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1) where r = RowIndex(..) and c = ColIndex(..). */ #define RowIndex(ARRAY,Y) \ ((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep)) #define ColIndex(ARRAY,X) \ ((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep)) spatstat.explore/src/KrectV3.h0000755000176200001440000000025314611073311016013 0ustar liggesusers/* KrectV4.h with or without border correction */ if((*doBord) == 1) { #define BORDER #include "KrectV4.h" } else { #undef BORDER #include "KrectV4.h" } spatstat.explore/src/KrectV2.h0000755000176200001440000000027314611073311016014 0ustar liggesusers/* KrectV3.h with or without translation correction */ if((*doTrans) == 1) { #define TRANSLATION #include "KrectV3.h" } else { #undef TRANSLATION #include "KrectV3.h" } spatstat.explore/src/ripleybox.h0000644000176200001440000000670014611073311016547 0ustar liggesusers/* ripleybox.h Ripley's edge correction for rectangular windows This file is #included multiple times in corrections.c Macros used: RIPLEYFUN Name of C function DEBUGBOX #defined if debugging information should be printed. *CHUNKLOOP defined in chunkloop.h TWOPI defined in Rmath.h $Revision: 1.5 $ $Date: 2022/11/08 01:37:31 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ void RIPLEYFUN( /* coordinate vectors of length nx */ int *nx, double *x, double *y, /* matrix of radii nx by nr */ double *rmat, int *nr, /* box dimensions */ double *xmin, double *ymin, double *xmax, double *ymax, /* threshold for proximity to corner */ double *epsilon, /* output matrix nx by nr */ double *out ) { int i, j, n, m, ijpos, ncor, maxchunk; double xx, yy, x0, y0, x1, y1, dL, dR, dU, dD, aL, aU, aD, aR, rij; double cL, cU, cD, cR, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double corner, extang; double eps; n = *nx; m = *nr; x0 = *xmin; y0 = *ymin; x1 = *xmax; y1 = *ymax; eps = *epsilon; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xx = x[i]; yy = y[i]; /* perpendicular distance from point to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xx - x0; dR = x1 - xx; dD = yy - y0; dU = y1 - yy; /* test for corner of the rectangle */ #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < eps) ? 1 : 0) ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2) ? YES : NO; /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); for(j = 0; j < m; j++) { ijpos = j * n + i; rij = rmat[ijpos]; #ifdef DEBUGBOX Rprintf("rij = %lf\n", rij); #endif if(rij == 0.0) { /* Circle of radius 0 */ out[ijpos] = 1.0; } else { /* Fraction of circle Compute half the angle subtended by the intersection between the circle of radius r[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < rij) ? acos(dL/rij) : 0.0; aR = (dR < rij) ? acos(dR/rij) : 0.0; aD = (dD < rij) ? acos(dD/rij) : 0.0; aU = (dU < rij) ? acos(dU/rij) : 0.0; #ifdef DEBUGBOX Rprintf("aL = %lf\n", aL); Rprintf("aR = %lf\n", aR); Rprintf("aD = %lf\n", aD); Rprintf("aU = %lf\n", aU); #endif /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); #ifdef DEBUGBOX Rprintf("cL = %lf\n", cL); Rprintf("cR = %lf\n", cR); Rprintf("cD = %lf\n", cD); Rprintf("cU = %lf\n", cU); #endif /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; #ifdef DEBUGBOX Rprintf("extang = %lf\n", extang); #endif /* add pi/2 for corners */ if(corner) { extang += 1.0/4.0; #ifdef DEBUGBOX Rprintf("extang = %lf\n", extang); #endif } /* OK, now compute weight */ out[ijpos] = 1.0 / (1.0 - extang); } } } } } spatstat.explore/src/KrectV1.h0000755000176200001440000000026314611073311016012 0ustar liggesusers/* KrectV2.h with or without isotropic correction */ if((*doIso) == 1) { #define ISOTROPIC #include "KrectV2.h" } else { #undef ISOTROPIC #include "KrectV2.h" } spatstat.explore/src/loccum.c0000755000176200001440000000304214611073311016006 0ustar liggesusers#include #include #include #include "chunkloop.h" /* loccum.c $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Compute local cumulative sums or products of weights locsum: f_i(t) = \sum_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxsum: f_u(t) = \sum_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) locprod: f_i(t) = \prod_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxprod: f_u(t) = \prod_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} (also works if {u} is another point pattern) Assumes point patterns are sorted in increasing order of x coordinate Uses C code template files : loccums.h, loccumx.h */ /* data-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccums.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccums.h" /* test-grid-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locxsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccumx.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locxprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccumx.h" spatstat.explore/src/KrectV4.h0000755000176200001440000000027514611073311016020 0ustar liggesusers/* KrectV5.h with or without uncorrected estimator */ if((*doUnco) == 1) { #define UNCORRECTED #include "KrectBody.h" } else { #undef UNCORRECTED #include "KrectBody.h" } spatstat.explore/src/chunkloop.h0000755000176200001440000000161514611073311016537 0ustar liggesusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat.explore/src/geom3.h0000755000176200001440000000041014611073311015537 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions for 3D geometrical structures */ typedef struct Point { double x; double y; double z; } Point; typedef struct Box { double x0; double x1; double y0; double y1; double z0; double z1; } Box; spatstat.explore/src/pairloop.h0000755000176200001440000000344714611073311016367 0ustar liggesusers/* pairloop.h Generic code template for loop collecting contributions to point x_i from all points x_j such that ||x_i - x_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n, maxchunk; double xi, yi, dx, dy, dx2, d2, r2max; double *x, *y; $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n, maxchunk, 65536) { \ \ xi = x[i]; \ yi = y[i]; \ \ INITIAL_I; \ \ if(i > 0) { \ for(j=i-1; j >= 0; j--) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ \ if(i+1 < n) { \ for(j=i+1; j < n; j++) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ COMMIT_I; \ } \ } spatstat.explore/src/scan.c0000755000176200001440000000411414611073311015451 0ustar liggesusers/* scan.c Scan transform $Revision: 1.6 $ $Date: 2022/10/22 09:29:51 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "raster.h" void shape_raster(Raster *ras, void *data, double xmin, double ymin, double xmax, double ymax, int nrow, int ncol, int mrow, int mcol); void Cscantrans( double *x, double *y, /* data points */ int npt, double R, /* radius */ Raster *out /* scan image */ ) { int i,j,k,l,m; double d2, R2; int rmin, rmax, cmin, cmax, Rrow, Rcol, lmin, lmax, mmin, mmax; /* initialise raster */ Clear(*out,int,0); /* If the list of data points is empty, ... exit now */ if(npt == 0) return; R2 = R * R; cmin = out->cmin; cmax = out->cmax; rmin = out->rmin; rmax = out->rmax; /* disc size in rows/columns */ Rrow = (int) ceil(R/(out->ystep)); Rcol = (int) ceil(R/(out->xstep)); if(Rrow < 1) Rrow = 1; if(Rcol < 1) Rcol = 1; /* run through points */ for(i = 0; i < npt; i++) { j = RowIndex(*out,y[i]); k = ColIndex(*out,x[i]); lmin = j - Rrow; if(lmin < rmin) lmin = rmin; lmax = j + Rrow; if(lmax > rmax) lmax = rmax; mmin = k - Rcol; if(mmin < cmin) mmin = cmin; mmax = k + Rcol; if(mmax > cmax) mmax = cmax; for(l = lmin; l <= lmax; l++) { for(m = mmin; m <= mmax; m++) { d2 = DistanceToSquared(x[i],y[i],*out,l,m); if(d2 <= R2) Entry(*out,l,m,int) += 1; } } } } /* R interface */ void scantrans( double *x, double *y, /* input data points */ int *n, double *xmin, double *ymin, double *xmax, double *ymax, /* guaranteed bounding box */ int *nr, int *nc, /* desired raster dimensions */ double *R, /* radius */ /* output array */ int *counts /* number of R-close points */ ) { Raster out; int nrow, ncol, npoints; double r; nrow = *nr; ncol = *nc; npoints = *n; r = *R; shape_raster( &out, (void *) counts, *xmin,*ymin,*xmax,*ymax, nrow, ncol, 0, 0); Cscantrans(x, y, npoints, r, &out); } spatstat.explore/src/f3.c0000755000176200001440000002440414611073311015041 0ustar liggesusers#include #include #include #include "geom3.h" #include "functable.h" #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif /* $Revision: 1.4 $ $Date: 2016/10/23 04:24:03 $ 3D distance transform # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2022 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ /* step lengths in distance transform */ #define STEP1 41 #define STEP2 58 #define STEP3 71 /* (41,58,71)/41 is a good rational approximation to (1, sqrt(2), sqrt(3)) */ #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) typedef struct IntImage { int *data; int Mx, My, Mz; /* dimensions */ int length; } IntImage; typedef struct BinaryImage { unsigned char *data; int Mx, My, Mz; /* dimensions */ int length; } BinaryImage; #define VALUE(I,X,Y,Z) \ ((I).data)[ (Z) * ((I).Mx) * ((I).My) + (Y) * ((I).Mx) + (X) ] void allocBinImage( BinaryImage *b, int *ok ) { b->length = b->Mx * b->My * b->Mz; b->data = (unsigned char *) R_alloc(b->length, sizeof(unsigned char)); if(b->data == 0) { Rprintf("Can't allocate memory for %d binary voxels\n", b->length); *ok = 0; } *ok = 1; } void allocIntImage( IntImage *v, int *ok ) { v->length = v->Mx * v->My * v->Mz; v->data = (int *) R_alloc(v->length, sizeof(int)); if(v->data == 0) { Rprintf("Can't allocate memory for %d integer voxels\n", v->length); *ok = 0; } *ok = 1; } void freeBinImage(BinaryImage *b) { } void freeIntImage(IntImage *v) { } void cts2bin( /* convert a list of points inside a box into a 3D binary image */ Point *p, int n, Box *box, double vside, /* side of a (cubic) voxel */ BinaryImage *b, int *ok ) { int i, lx, ly, lz; unsigned char *cp; b->Mx = (int) ceil((box->x1 - box->x0)/vside) + 1; b->My = (int) ceil((box->y1 - box->y0)/vside) + 1; b->Mz = (int) ceil((box->z1 - box->z0)/vside) + 1; allocBinImage(b, ok); if(! (*ok)) return; for(i = b->length, cp = b->data; i ; i--, cp++) *cp = 1; for(i=0;ix0)/vside)-1; ly = (int) ceil((p[i].y - box->y0)/vside)-1; lz = (int) ceil((p[i].z - box->z0)/vside)-1; if( lx >= 0 && lx < b->Mx && ly >= 0 && ly < b->My && lz >= 0 && lz < b->Mz ) VALUE((*b),lx,ly,lz) = 0; } } void distrans3( /* Distance transform in 3D */ BinaryImage *b, /* input */ IntImage *v, /* output */ int *ok ) { register int x, y, z; int infinity, q; /* allocate v same size as b */ v->Mx = b->Mx; v->My = b->My; v->Mz = b->Mz; allocIntImage(v, ok); if(! (*ok)) return; /* compute largest possible distance */ infinity = (int) ceil( ((double) STEP3) * sqrt( ((double) b->Mx) * b->Mx + ((double) b->My) * b->My + ((double) b->Mz) * b->Mz)); /* Forward pass: Top to Bottom; Back to Front; Left to Right. */ for(z=0;zMz;z++) { R_CheckUserInterrupt(); for(y=0;yMy;y++) { for(x=0;xMx;x++) { if(VALUE((*b),x,y,z) == 0) VALUE((*v),x,y,z) = 0; else { q = infinity; #define INTERVAL(W, DW, MW) \ ((DW == 0) || (DW == -1 && W > 0) || (DW == 1 && W < MW - 1)) #define BOX(X,Y,Z,DX,DY,DZ) \ (INTERVAL(X,DX,v->Mx) && INTERVAL(Y,DY,v->My) && INTERVAL(Z,DZ,v->Mz)) #define TEST(DX,DY,DZ,DV) \ if(BOX(x,y,z,DX,DY,DZ) && q > VALUE((*v),x+DX,y+DY,z+DZ) + DV) \ q = VALUE((*v),x+DX,y+DY,z+DZ) + DV /* same row */ TEST(-1, 0, 0, STEP1); /* same plane */ TEST(-1,-1, 0, STEP2); TEST( 0,-1, 0, STEP1); TEST( 1,-1, 0, STEP2); /* previous plane */ TEST( 1, 1,-1, STEP3); TEST( 0, 1,-1, STEP2); TEST(-1, 1,-1, STEP3); TEST( 1, 0,-1, STEP2); TEST( 0, 0,-1, STEP1); TEST(-1, 0,-1, STEP2); TEST( 1,-1,-1, STEP3); TEST( 0,-1,-1, STEP2); TEST(-1,-1,-1, STEP3); VALUE((*v),x,y,z) = q; } } } } /* Backward pass: Bottom to Top; Front to Back; Right to Left. */ for(z = b->Mz - 1; z >= 0; z--) { R_CheckUserInterrupt(); for(y = b->My - 1; y >= 0; y--) { for(x = b->Mx - 1; x >= 0; x--) { if((q = VALUE((*v),x,y,z)) != 0) { /* same row */ TEST(1, 0, 0, STEP1); /* same plane */ TEST(-1, 1, 0, STEP2); TEST( 0, 1, 0, STEP1); TEST( 1, 1, 0, STEP2); /* plane below */ TEST( 1, 1, 1, STEP3); TEST( 0, 1, 1, STEP2); TEST(-1, 1, 1, STEP3); TEST( 1, 0, 1, STEP2); TEST( 0, 0, 1, STEP1); TEST(-1, 0, 1, STEP2); TEST( 1,-1, 1, STEP3); TEST( 0,-1, 1, STEP2); TEST(-1,-1, 1, STEP3); VALUE((*v),x,y,z) = q; } } } } } void hist3d( /* compute histogram of all values in *v using count->n histogram cells ranging from count->t0 to count->t1 and put results in count->num */ IntImage *v, double vside, Itable *count ) { register int i, j, k; register int *ip; register double scale, width; /* relationship between distance transform units and physical units */ scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); for(i = 0; i < count->n ; i++) { (count->num)[i] = 0; (count->denom)[i] = v->length; } for(i = v->length, ip = v->data; i; i--, ip++) { k = (int) ceil((*ip * scale - count->t0)/width); k = MAX(k, 0); for(j = k; j < count->n; j++) (count->num)[j]++; } } void hist3dminus( /* minus sampling */ IntImage *v, double vside, Itable *count ) { register int x, y, z, val, border, bx, by, bz, byz, j, kbord, kval; register double scale, width; DEBUGMESSAGE("inside hist3dminus\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeItable */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); kbord = (int) floor((vside * border - count->t0)/width); kbord = MIN(kbord, count->n - 1); /* denominator counts all voxels with distance to boundary >= r */ if(kbord >= 0) for(j = 0; j <= kbord; j++) (count->denom)[j]++; val = VALUE((*v), x, y, z); kval = (int) ceil((val * scale - count->t0)/width); kval = MAX(kval, 0); #ifdef DEBUG /* Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", vside * border, kbord, scale * val, kval); */ #endif /* numerator counts all voxels with distance to boundary >= r and distance to nearest point <= r */ if(kval <= kbord) for(j = kval; j <= kbord; j++) (count->num)[j]++; } } } DEBUGMESSAGE("leaving hist3dminus\n") } void hist3dCen( /* four censoring-related histograms */ IntImage *v, double vside, H4table *count ) { register int x, y, z, val, border, bx, by, bz, byz, kbord, kval; register double scale, width, realborder, realval; DEBUGMESSAGE("inside hist3dCen\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeH4table */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); realborder = vside * border; kbord = (int) floor((realborder - count->t0)/width); val = VALUE((*v), x, y, z); realval = scale * val; kval = (int) ceil((realval - count->t0)/width); /* this could exceed array limits; that will be detected below */ #ifdef DEBUG Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", realborder, kbord, realval, kval); #endif if(realval <= realborder) { /* observation is uncensored; increment all four histograms */ if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) { (count->obs)[kval]++; (count->nco)[kval]++; } if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) { (count->cen)[kbord]++; (count->ncc)[kbord]++; } } else { /* observation is censored; increment only two histograms */ kval = MIN(kval, kbord); if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) (count->obs)[kval]++; if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) (count->cen)[kbord]++; } } } } DEBUGMESSAGE("leaving hist3dCen\n") } /* CALLING ROUTINES */ void phatminus( Point *p, int n, Box *box, double vside, Itable *count ) { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dminus(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } void phatnaive( Point *p, int n, Box *box, double vside, Itable *count ) { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatnaive\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\n into distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3d..."); hist3d(&v, vside, count); DEBUGMESSAGE("out of hist3d\n") freeIntImage(&v); } } void p3hat4( Point *p, int n, Box *box, double vside, H4table *count ) { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dCen(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } spatstat.explore/src/proto.h0000644000176200001440000001547014733462622015715 0ustar liggesusers#include #include /* Prototype declarations for all native routines in spatstat.explore package Automatically generated - do not edit! */ /* Functions invoked by .C */ void digberJ(double *, double *, int *, int *, int *, double *); void Gdenspt(int *, double *, double *, double *, double *); void Gwtdenspt(int *, double *, double *, double *, double *, double *); void Gwtdenspt(int *, double *, double *, double *, double *, double *); void denspt(int *, double *, double *, double *, double *, int *, double *); void wtdenspt(int *, double *, double *, double *, double *, double *, int *, double *); void wtdenspt(int *, double *, double *, double *, double *, double *, int *, double *); void adenspt(int *, double *, double *, double *, double *, double *, int *, double *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, int *, double *); void awtdenspt(int *, double *, double *, double *, double *, double *, double *, int *, double *); void crdenspt(int *, double *, double *, int *, double *, double *, double *, double *, int *, double *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, int *, double *); void wtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, int *, double *); void acrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, int *, double *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, int *, double *); void awtcrdenspt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, int *, double *); void segdens(double *, int *, double *, double *, double *, double *, int *, double *, double *, double *); void segwdens(double *, int *, double *, double *, double *, double *, double *, int *, double *, double *, double *); void ripleybox(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void ripboxDebug(int *, double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void ripleypoly(int *, double *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *); void rippolDebug(int *, double *, double *, double *, int *, double *, int *, double *, double *, double *, double *, double *); void RcallK3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *); void RcallG3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *); void RcallF3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *); void RcallG3cen(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *, int *, int *, int *); void Rcallpcf3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, int *, double *, double *, double *, int *, double *); void RcallF3(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *, double *, double *, int *, int *, int *, int *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *); void Cidw(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *); void Cidw2(double *, double *, double *, int *, double *, double *, int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void idwloo(double *, double *, double *, int *, double *, double *, double *, double *); void idwloo2(double *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void locprod(int *, double *, double *, double *, int *, double *, double *); void locxprod(int *, double *, double *, int *, double *, double *, double *, int *, double *, double *); void KborderI(int *, double *, double *, double *, int *, double *, int *, int *); void KborderD(int *, double *, double *, double *, int *, double *, double *, double *); void Kwborder(int *, double *, double *, double *, double *, int *, double *, double *, double *); void KnoneI(int *, double *, double *, int *, double *, int *); void KnoneD(int *, double *, double *, int *, double *, double *); void Kwnone(int *, double *, double *, double *, int *, double *, double *); void KrectWtd(double *, double *, int *, double *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *); void KrectInt(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, int *, int *, int *); void KrectDbl(double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *); void locpcfx(int *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, double *); void locWpcfx(int *, double *, double *, int *, int *, double *, double *, int *, double *, int *, double *, double *, double *); void scantrans(double *, double *, int *, double *, double *, double *, double *, int *, int *, double *, int *); void Gsmoopt(int *, double *, double *, double *, int *, double *, double *); void Gwtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *); void smoopt(int *, double *, double *, double *, int *, double *, double *, double *); void wtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *); void asmoopt(int *, double *, double *, double *, int *, double *, double *, double *); void awtsmoopt(int *, double *, double *, double *, int *, double *, double *, double *, double *); void crsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void wtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); void acrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *); void awtcrsmoopt(int *, double *, double *, int *, double *, double *, double *, double *, double *, double *, double *); /* Functions invoked by .Call */ spatstat.explore/src/segdens.c0000755000176200001440000000502414611073311016156 0ustar liggesusers#include #include #include #include /* segdens.c Convolution of segments with Gaussian kernel Entry points: segdens unweighted segwdens weighted Copyright (c) Adrian Baddeley, 02 dec 2016 (modified 01 mar 2021) Licence: GPL >= 2.0 $Revision: 1.8 $ $Date: 2022/10/23 05:56:57 $ */ #define DNORM(X, SIG) dnorm((X), (double) 0.0, (SIG), FALSE) #define PNORM(X, SIG) pnorm((X), (double) 0.0, (SIG), TRUE, FALSE) void segdens( double *sigma, /* bandwidth */ /* input line segments */ int *ns, /* number of line segments */ double *xs, double *ys, double *alps, double *lens, /* first endpoint, angle, length */ /* query locations */ int *np, /* number of pixels or test locations */ double *xp, double *yp, /* coordinates of pixels or test locations */ /* output */ double *z /* result, assumed initially 0 */ ) { int i, j, Ns, Np; double Sigma; double xsi, ysi, angi, leni, cosi, sini; double dx, dy, u1, u2; Ns = *ns; Np = *np; Sigma = *sigma; for(i = 0; i < Ns; i++) { R_CheckUserInterrupt(); xsi = xs[i]; ysi = ys[i]; angi = alps[i]; leni = lens[i]; cosi = cos(angi); sini = sin(angi); for(j = 0; j < Np; j++) { dx = xp[j] - xsi; dy = yp[j] - ysi; u1 = dx * cosi + dy * sini; u2 = -dx * sini + dy * cosi; z[j] += DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma)); } } } void segwdens( double *sigma, /* bandwidth */ /* input line segments */ int *ns, /* number of line segments */ double *xs, double *ys, double *alps, double *lens, /* first endpoint, angle, length */ double *ws, /* segment weights */ /* query locations */ int *np, /* number of pixels or test locations */ double *xp, double *yp, /* coordinates of pixels or test locations */ /* output */ double *z /* result, assumed initially 0 */ ) { int i, j, Ns, Np; double Sigma; double xsi, ysi, angi, leni, cosi, sini, wi; double dx, dy, u1, u2; Ns = *ns; Np = *np; Sigma = *sigma; for(i = 0; i < Ns; i++) { R_CheckUserInterrupt(); xsi = xs[i]; ysi = ys[i]; angi = alps[i]; leni = lens[i]; wi = ws[i]; cosi = cos(angi); sini = sin(angi); for(j = 0; j < Np; j++) { dx = xp[j] - xsi; dy = yp[j] - ysi; u1 = dx * cosi + dy * sini; u2 = -dx * sini + dy * cosi; z[j] += wi * DNORM(u2, Sigma) * (PNORM(u1, Sigma) - PNORM(u1-leni, Sigma)); } } } spatstat.explore/src/Kborder.h0000755000176200001440000001075414611073311016131 0ustar liggesusers/* Kborder.h Code template for K function estimators in Kborder.c Variables: FNAME function name OUTTYPE storage type of the output vectors ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.13 $ $Date: 2022/10/21 10:43:01 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME( /* inputs */ int *nxy, double *x, double *y, #ifdef WEIGHTED double *w, #endif double *b, int *nr, double *rmax, /* outputs */ OUTTYPE *numer, OUTTYPE *denom ) { int i, j, l, n, nt, n1, nt1, lmin, lmax, maxchunk; double dt, tmax, xi, yi, bi, maxsearch, max2search; double bratio, dratio, dij, dij2, dx, dy, dx2; OUTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; OUTTYPE naccum, daccum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; nt1 = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; /* initialise */ numerLowAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); numerHighAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); denomAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); for(l = 0; l < nt; l++) numer[l] = denom[l] = numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { /* -------- DENOMINATOR -------------*/ bi = b[i]; #ifdef WEIGHTED wi = w[i]; #endif /* increment denominator for all r < b[i] */ bratio = bi/dt; /* lmax is the largest integer STRICTLY less than bratio */ lmax = (int) ceil(bratio) - 1; lmax = (lmax <= nt1) ? lmax : nt1; /* effectively increment entries 0 to lmax */ if(lmax >= 0) denomAccum[lmax] += WI; /* ---------- NUMERATOR -----------*/ /* scan through points (x[j],y[j]) */ xi = x[i]; yi = y[i]; maxsearch = (bi < tmax) ? bi : tmax; max2search = maxsearch * maxsearch; /* scan backward from i-1 until |x[j]-x[i]| > maxsearch or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } /* scan forward from i+1 until x[j]-x[i] > maxsearch or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } } } /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=nt1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; denom[l] = daccum; naccum += numerHighAccum[l]; numer[l] = naccum; naccum -= numerLowAccum[l]; } } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat.explore/src/sphevol.c0000755000176200001440000001010714660756016016222 0ustar liggesusers#include #include #include "geom3.h" /* $Revision: 1.4 $ $Date: 2024/08/19 23:51:38 $ Routine for calculating ABSOLUTE volume of intersection between sphere and box Arbitrary positions: point is allowed to be inside or outside box. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2024 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif #include "yesno.h" #define ABS(X) ((X >= 0.0) ? (X) : -(X)) static double rcubed, spherevol; double sphevol( Point *point, Box *box, double r ) { double sum, p[4], q[4]; int i, j; double v1(double a, int s, double r); double v2(double a, int sa, double b, int sb, double r); double v3(double a, int sa, double b, int sb, double c, int sc, double r); rcubed = r * r * r; spherevol = (4.0/3.0) * M_PI * rcubed; p[1] = box->x0 - point->x; p[2] = box->y0 - point->y; p[3] = box->z0 - point->z; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += v1(p[i], -1, r) + v1(q[i], 1, r); #ifdef DEBUG Rprintf("i = %d, v1 = %f, v1 = %f\n", i, v1(p[i], -1, r), v1(q[i], 1, r)); #endif } DBG("Past v1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= v2(p[i], -1, p[j], -1, r) + v2(p[i], -1, q[j], 1, r) + v2(q[i], 1, p[j], -1, r) + v2(q[i], 1, q[j], 1, r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past v2", sum) sum += v3(p[1], -1, p[2], -1, p[3], -1, r) + v3(p[1], -1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(p[1], -1, q[2], 1, p[3], -1, r) + v3(p[1], -1, q[2], 1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, p[2], -1, p[3], -1, r) + v3(q[1], 1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, q[2], 1, p[3], -1, r) + v3(q[1], 1, q[2], 1, q[3], 1, r); DBG("Past v3", sum) DBG("sphere volume", spherevol) return(spherevol - sum); } double v1(double a, int s, double r) { double value; short sign; double u(double a, double b, double c); value = 4.0 * rcubed * u(ABS(a)/r, 0.0, 0.0); sign = (a >= 0.0) ? 1 : -1; if(sign == s) return(value); else return(spherevol - value); } double v2(double a, int sa, double b, int sb, double r) { short sign; double u(double a, double b, double c); sign = (b >= 0.0) ? 1 : -1; if(sign != sb ) return(v1(a, sa, r) - v2(a, sa, ABS(b), 1, r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v1(b, sb, r) - v2(ABS(a), 1, b, sb, r)); a = ABS(a); return(2.0 * rcubed * u(a/r, b/r, 0.0)); } double v3(double a, int sa, double b, int sb, double c, int sc, double r) { short sign; double u(double a, double b, double c); sign = (c >= 0.0) ? 1 : -1; if(sign != sc) return(v2(a,sa,b,sb,r) - v3(a,sa,b,sb, ABS(c), 1, r)); c = ABS(c); sc = 1; sign = (b >= 0.0) ? 1 : -1; if(sign != sb) return(v2(a,sa,c,sc,r) - v3(a,sa,ABS(b),1,c,sc,r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v2(b,sb, c, sc, r) - v3(ABS(a),1, b, sb, c, sc, r)); a = ABS(a); return(rcubed * u(a/r, b/r, c/r)); } double u(double a, double b, double c) { double w(double x, double y); if(a * a + b * b + c * c >= 1.0) return(0.0); return( (M_PI/12.0) * (2.0 - 3.0 * (a + b + c) + (a * a * a + b * b * b + c * c * c)) + w(a,b) + w(b,c) + w(a,c) - a * b * c ); } double w(double x, double y) /* Arguments assumed >= 0 */ { double z; z = sqrt(1 - x * x - y * y); return( (x / 2.0 - x * x * x / 6.0) * atan2(y, z) + (y / 2.0 - y * y * y / 6.0) * atan2(x, z) - ( atan2(x * y , z) - x * y * z )/3.0 ); } spatstat.explore/src/Kborder.c0000755000176200001440000000172214611073311016117 0ustar liggesusers#include #include #include /* Kborder.c Efficient computation of border-corrected estimates of K for large datasets KborderI() Estimates K function, returns integer numerator & denominator KborderD() Estimates K function, returns double precision numerator & denominator Kwborder() Estimates Kinhom. Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef WEIGHTED #define FNAME KborderI #define OUTTYPE int #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME KborderD #define OUTTYPE double #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME Kwborder #define WEIGHTED #define OUTTYPE double #include "Kborder.h" spatstat.explore/src/localpcf.c0000755000176200001440000000064314611073311016313 0ustar liggesusers#include #include #include #include "chunkloop.h" /* localpcf.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Assumes point patterns are sorted in increasing order of x coordinate */ #undef WEIGHTED #include "localpcf.h" #define WEIGHTED 1 #include "localpcf.h" spatstat.explore/src/init.c0000644000176200001440000000572414733462622015511 0ustar liggesusers /* Native symbol registration table for spatstat.explore package Automatically generated - do not edit this file! */ #include "proto.h" #include // for NULL #include /* See proto.h for declarations for the native routines registered below. */ static const R_CMethodDef CEntries[] = { {"acrdenspt", (DL_FUNC) &acrdenspt, 11}, {"acrsmoopt", (DL_FUNC) &acrsmoopt, 10}, {"adenspt", (DL_FUNC) &adenspt, 8}, {"asmoopt", (DL_FUNC) &asmoopt, 8}, {"awtcrdenspt", (DL_FUNC) &awtcrdenspt, 12}, {"awtcrsmoopt", (DL_FUNC) &awtcrsmoopt, 11}, {"awtdenspt", (DL_FUNC) &awtdenspt, 9}, {"awtsmoopt", (DL_FUNC) &awtsmoopt, 9}, {"Cidw", (DL_FUNC) &Cidw, 14}, {"Cidw2", (DL_FUNC) &Cidw2, 16}, {"crdenspt", (DL_FUNC) &crdenspt, 10}, {"crsmoopt", (DL_FUNC) &crsmoopt, 10}, {"denspt", (DL_FUNC) &denspt, 7}, {"digberJ", (DL_FUNC) &digberJ, 6}, {"Gdenspt", (DL_FUNC) &Gdenspt, 5}, {"Gsmoopt", (DL_FUNC) &Gsmoopt, 7}, {"Gwtdenspt", (DL_FUNC) &Gwtdenspt, 6}, {"Gwtsmoopt", (DL_FUNC) &Gwtsmoopt, 8}, {"idwloo", (DL_FUNC) &idwloo, 8}, {"idwloo2", (DL_FUNC) &idwloo2, 10}, {"KborderD", (DL_FUNC) &KborderD, 8}, {"KborderI", (DL_FUNC) &KborderI, 8}, {"KnoneD", (DL_FUNC) &KnoneD, 6}, {"KnoneI", (DL_FUNC) &KnoneI, 6}, {"KrectDbl", (DL_FUNC) &KrectDbl, 17}, {"KrectInt", (DL_FUNC) &KrectInt, 17}, {"KrectWtd", (DL_FUNC) &KrectWtd, 18}, {"Kwborder", (DL_FUNC) &Kwborder, 9}, {"Kwnone", (DL_FUNC) &Kwnone, 7}, {"locpcfx", (DL_FUNC) &locpcfx, 12}, {"locprod", (DL_FUNC) &locprod, 7}, {"locWpcfx", (DL_FUNC) &locWpcfx, 13}, {"locxprod", (DL_FUNC) &locxprod, 10}, {"RcallF3", (DL_FUNC) &RcallF3, 17}, {"RcallF3cen", (DL_FUNC) &RcallF3cen, 20}, {"RcallG3", (DL_FUNC) &RcallG3, 17}, {"RcallG3cen", (DL_FUNC) &RcallG3cen, 19}, {"RcallK3", (DL_FUNC) &RcallK3, 17}, {"Rcallpcf3", (DL_FUNC) &Rcallpcf3, 18}, {"ripboxDebug", (DL_FUNC) &ripboxDebug, 11}, {"ripleybox", (DL_FUNC) &ripleybox, 11}, {"ripleypoly", (DL_FUNC) &ripleypoly, 12}, {"rippolDebug", (DL_FUNC) &rippolDebug, 12}, {"scantrans", (DL_FUNC) &scantrans, 11}, {"segdens", (DL_FUNC) &segdens, 10}, {"segwdens", (DL_FUNC) &segwdens, 11}, {"smoopt", (DL_FUNC) &smoopt, 8}, {"wtcrdenspt", (DL_FUNC) &wtcrdenspt, 11}, {"wtcrsmoopt", (DL_FUNC) &wtcrsmoopt, 11}, {"wtdenspt", (DL_FUNC) &wtdenspt, 8}, {"wtsmoopt", (DL_FUNC) &wtsmoopt, 9}, {NULL, NULL, 0} }; void R_init_spatstat_explore(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } spatstat.explore/src/yesno.h0000755000176200001440000000011614611073311015665 0ustar liggesusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat.explore/src/ripleypoly.h0000755000176200001440000002443314611073311016750 0ustar liggesusers/* ripleypoly.h Ripley's edge correction for polygonal windows This file is #included multiple times in corrections.c Macros used: RIPLEYFUN Name of C function DEBUGPOLY #defined if debugging information should be printed. TESTINSIDE defined in corrections.c *CHUNKLOOP defined in chunkloop.h TWOPI defined in Rmath.h $Revision: 1.22 $ $Date: 2022/10/20 10:57:43 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2019 Licence: GNU Public Licence >= 2 */ #undef DEBUGLEVEL #ifndef DEBUGPOLY #define DEBUGLEVEL 0 #else #define DEBUGLEVEL 3 #endif /* SPLITPOINT is used only when DEBUGLEVEL = 2 */ #undef SPLITPOINT #define SPLITPOINT 0 #undef ROUNDED #ifdef _WIN32 /* Avoid quirks of Windows i386 */ #define ROUNDED(X) ((float)(X)) #else #define ROUNDED(X) ((float)(X)) /* WAS: define ROUNDED(X) ((double)(X)) */ #endif void RIPLEYFUN( /* inputs */ int *nc, /* number of centre points */ double *xc, /* coordinates of centre points */ double *yc, double *bd, /* distances to boundary from centre points */ int *nr, double *rmat, /* matrix of radii (nc by nr) */ int *nseg, /* number of polygon edges */ double *x0, /* coordinates of polygon vertices */ double *y0, double *x1, double *y1, /* output */ double *out ) { int n, m, i, j, k, l, nradperpt, ncut, nchanges, maxchunk; double xcentre, ycentre, xx0, yy0, xx1, yy1, xx01, yy01; double bdisti; double x, y, radius, radius2, dx0, dx1, dy0; double a, b, c, t, det, sqrtdet, tmp; double theta[6], delta[7], tmid[7]; double xtest, ytest, contrib, total; n = *nc; nradperpt = *nr; m = *nseg; #if (DEBUGLEVEL == 2) Rprintf("/// Debug level 2, split point %d ///\n", (int) SPLITPOINT); #elif (DEBUGLEVEL > 0) Rprintf("/// Debug level %d ///\n", (int) DEBUGLEVEL); #endif OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xcentre = xc[i]; ycentre = yc[i]; bdisti = bd[i]; #if (DEBUGLEVEL >= 3) Rprintf("------- centre[%d] = (%lf, %lf) ------\n", i, xcentre, ycentre); Rprintf(" boundary distance %lf \n", bdisti); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 1)) Rprintf("------- centre[%d] ------\n", i); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = (double) (radius * radius); #if (DEBUGLEVEL >= 3) Rprintf("radius[%d] = %lf\n", j, radius); #elif (DEBUGLEVEL >= 2) Rprintf("radius[%d]\n", j); #endif if(bdisti > radius || radius == 0.0) { /* no crossings */ total = TWOPI; #if (DEBUGLEVEL >= 2) Rprintf("no crossings; total = 2*pi\n"); #endif } else { /* run through all boundary segments */ total = 0.0; for(k=0; k < m; k++) { ncut = 0; xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #if (DEBUGLEVEL >= 3) Rprintf("... Edge[%d] = (%lf,%lf) to (%lf,%lf)\n", k, xx0, yy0, xx1, yy1); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 2)) Rprintf("... Edge[%d]\n", k); #endif /* intersection with left edge */ dx0 = xx0 - xcentre; det = (double) (radius2 - dx0 * dx0); #if (DEBUGLEVEL >= 3) Rprintf("Left: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 3)) Rprintf("Left:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 4))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); y = (double) (ycentre + sqrtdet); if(ROUNDED(y) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(y - ycentre, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\tcut left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 5)) Rprintf("\tcut left (+)\n"); #endif ncut++; } y = (double) (ycentre - sqrtdet); if(ROUNDED(y) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(y-ycentre, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\tcut left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 6)) Rprintf("\tcut left (-)\n"); #endif ncut++; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 7))) Rprintf("\tdet = 0\n"); #endif if(ROUNDED(ycentre) < ROUNDED(yy0)) { theta[ncut] = (double) atan2(0.0, dx0); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent left at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 8)) Rprintf("\ttangent left\n"); #endif ncut++; } } /* intersection with right edge */ dx1 = xx1 - xcentre; det = (double) (radius2 - dx1 * dx1); #if (DEBUGLEVEL >= 3) Rprintf("Right: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 9)) Rprintf("Right:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 10))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); y = (double) (ycentre + sqrtdet); if(ROUNDED(y) < ROUNDED(yy1)) { theta[ncut] = (double) atan2(y - ycentre, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\tcut right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 11)) Rprintf("\tcut right (+)\n"); #endif ncut++; } y = (double) (ycentre - sqrtdet); if(ROUNDED(y) < ROUNDED(yy1)) { theta[ncut] = (double) atan2(y - ycentre, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\tcut right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 12)) Rprintf("\tcut right (-)\n"); #endif ncut++; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 13))) Rprintf("\tdet = 0\n"); #endif if(ycentre < yy1) { theta[ncut] = (double) atan2(0.0, dx1); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent right at theta= %lf\n", theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 14)) Rprintf("\ttangent right\n"); #endif ncut++; } } /* intersection with top segment */ xx01 = xx1 - xx0; yy01 = yy1 - yy0; dy0 = yy0 - ycentre; a = xx01 * xx01 + yy01 * yy01; b = 2 * (xx01 * dx0 + yy01 * dy0); c = dx0 * dx0 + dy0 * dy0 - radius2; det = (double) (b * b - 4 * a * c); #if (DEBUGLEVEL >= 3) Rprintf("Top: det = %lf\n", det); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 15)) Rprintf("Top:\n"); #endif if(ROUNDED(det) > ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 16))) Rprintf("\tdet > 0\n"); #endif sqrtdet = (double) sqrt(det); t = (double) ((sqrtdet - b)/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\thits + segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 17)) Rprintf("\thits + segment\n"); #endif ++ncut; } t = (double) ((-sqrtdet - b)/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\thits - segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 18)) Rprintf("\thits - segment\n"); #endif ++ncut; } } else if(ROUNDED(det) == ROUNDED(0.0)) { #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 19))) Rprintf("\tdet = 0\n"); #endif t = (double) (- b/(2 * a)); if(ROUNDED(0.0) <= ROUNDED(t) && ROUNDED(t) <= ROUNDED(1.0)) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = (double) atan2(y - ycentre, x - xcentre); #if (DEBUGLEVEL >= 3) Rprintf("\ttangent to segment: t = %lf, theta = %lf\n", t, theta[ncut]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 20)) Rprintf("\ttangent to segment\n"); #endif ++ncut; } } #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 21))) Rprintf("Finished cutting; ncut = %d\n", ncut); #endif /* for safety, force all angles to be in range [0, 2 * pi] */ if(ncut > 0) for(l = 0; l < ncut; l++) if(theta[l] < 0) theta[l] += TWOPI; /* sort angles */ if(ncut > 1) { do { nchanges = 0; for(l = 0; l < ncut - 1; l++) { if(theta[l] > theta[l+1]) { /* swap */ ++nchanges; tmp = theta[l]; theta[l] = theta[l+1]; theta[l+1] = tmp; } } } while(nchanges > 0); } #if (DEBUGLEVEL >= 3) if(ncut > 0) { for(l = 0; l < ncut; l++) Rprintf("theta[%d] = %lf\n", l, theta[l]); } #endif /* compute length of circumference inside polygon */ if(ncut == 0) { /* entire circle is either in or out */ xtest = xcentre + radius; ytest = ycentre; if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) contrib = TWOPI; else contrib = 0.0; } else { /* find midpoints and lengths of pieces (adding theta = ) */ delta[0] = theta[0]; tmid[0] = theta[0]/2; if(ncut > 1) { for(l = 1; l < ncut; l++) { delta[l] = theta[l] - theta[l-1]; tmid[l] = (theta[l] + theta[l-1])/2; } } delta[ncut] = TWOPI - theta[ncut - 1]; tmid[ncut] = (TWOPI + theta[ncut-1])/2; contrib = 0.0; for(l = 0; l <= ncut; l++) { #if (DEBUGLEVEL >= 3) Rprintf("Interval %d, width %lf:", l, delta[l]); #elif ((DEBUGLEVEL == 2) && (SPLITPOINT >= 22)) Rprintf("Interval %d:", l); #endif xtest = (double) (xcentre + radius * cos(tmid[l])); ytest = (double) (ycentre + radius * sin(tmid[l])); if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) { contrib += delta[l]; #if ((DEBUGLEVEL >= 3) || ((DEBUGLEVEL == 2) && (SPLITPOINT >= 23))) Rprintf("inside\n"); } else { Rprintf("outside\n"); #endif } } } /* multiply by sign of trapezium */ if(xx0 < xx1) contrib = -contrib; #if (DEBUGLEVEL >= 3) Rprintf("contrib = %lf\n", contrib); #endif total += contrib; } } out[ j * n + i] = total; #if (DEBUGLEVEL >= 1) Rprintf("\nTotal = %lf = %lf * (2 * pi)\n", total, total/TWOPI); #endif } } } } spatstat.explore/src/densptcross.c0000755000176200001440000002330114611073311017073 0ustar liggesusers#include #include #include "chunkloop.h" #include "crossloop.h" #include "constants.h" /* densptcross.c $Revision: 1.6 $ $Date: 2023/04/02 00:18:44 $ Assumes point patterns are sorted in increasing order of x coordinate *crdenspt Density estimate at points *crsmoopt Smoothed mark values at points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define TWOPI M_2PI double sqrt(double x); double exp(double x); #define STD_DECLARATIONS \ int i, j, n1, n2, maxchunk, jleft; \ double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; \ double *x1, *y1, *x2, *y2; #define STD_INITIALISE \ n1 = *nquery; \ x1 = xq; y1 = yq; \ n2 = *ndata; \ x2 = xd; y2 = yd; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void crdenspt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *rmaxi, /* maximum distance at which points contribute */ double *sig, /* Gaussian sd */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of computed density values */ ) { STD_DECLARATIONS; double resulti, coef; double sigma, a; STD_INITIALISE; if(n1 == 0 || n2 == 0) return; sigma = *sig; a = 1.0/(2.0 * sigma * sigma); coef = 1.0/(TWOPI * sigma * sigma); if(*squared) { coef *= coef; a *= 2.0; } CROSSLOOP( { resulti = 0.0; }, { resulti += exp(-d2 * a); } , { result[i] = coef * resulti; }) } void wtcrdenspt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *wd, /* weights of data points */ double *rmaxi, /* maximum distance at which points contribute */ double *sig, /* Gaussian sd */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of computed density values */ ) { STD_DECLARATIONS; double resulti, coef; double sigma, a; STD_INITIALISE; if(n1 == 0 || n2 == 0) return; sigma = *sig; a = 1.0/(2.0 * sigma * sigma); coef = 1.0/(TWOPI * sigma * sigma); if(*squared) { coef *= coef; a *= 2.0; } CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * exp(-d2 * a); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void acrdenspt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *rmaxi, /* maximum distance at which points contribute */ double *detsigma, /* determinant of variance matrix */ double *sinv, /* inverse variance matrix (2x2, flattened) */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of computed density values */ ) { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; if(n1 == 0 || n2 == 0) return; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); if(*squared) { coef *= coef; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; } else { s11 = sinv[0]/2.0; s12 = sinv[1]/2.0; s21 = sinv[2]/2.0; s22 = sinv[3]/2.0; } CROSSLOOP( { resulti = 0.0; }, { resulti += exp(- dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22)); }, { result[i] = coef * resulti; }) } void awtcrdenspt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *wd, /* weights of data points */ double *rmaxi, /* maximum distance at which points contribute */ double *detsigma, /* determinant of variance matrix */ double *sinv, /* inverse variance matrix (2x2, flattened) */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of weighted density values */ ) { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; if(n1 == 0 || n2 == 0) return; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); if(*squared) { coef *= coef; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; } else { s11 = sinv[0]/2.0; s12 = sinv[1]/2.0; s21 = sinv[2]/2.0; s22 = sinv[3]/2.0; } CROSSLOOP( { resulti = 0.0; }, { resulti += wd[j] * \ exp(- dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22)); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void crsmoopt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *vd, /* mark values at data points */ double *rmaxi, /* maximum distance at which points contribute */ double *sig, /* Gaussian sd */ /* output */ double *result /* vector of computed smoothed values */ ) { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void wtcrsmoopt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *vd, /* mark values at data points */ double *wd, /* weights of data points */ double *rmaxi, /* maximum distance */ double *sig, /* Gaussian sd */ /* output */ double *result /* vector of computed smoothed values */ ) { STD_DECLARATIONS; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } /* ------------- anisotropic versions -------------------- */ void acrsmoopt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *vd, /* mark values at data points */ double *rmaxi, /* maximum distance at which points contribute */ double *sinv, /* inverse variance matrix (2x2, flattened) */ /* output */ double *result /* vector of smoothed values */ ) { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } void awtcrsmoopt( /* inputs */ int *nquery, /* number of locations to be interrogated */ double *xq, double *yq, /* (x,y) coordinates to be interrogated */ int *ndata, /* number of data points */ double *xd, double *yd, /* (x,y) coordinates of data */ double *vd, /* mark values at data points */ double *wd, /* weights of data points */ double *rmaxi, /* maximum distance at which points contribute */ double *sinv, /* inverse variance matrix (2x2, flattened) */ /* output */ double *result /* vector of smoothed values */ ) { STD_DECLARATIONS; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n1 == 0 || n2 == 0) return; CROSSLOOP({ numer = denom = 0.0; }, { \ wij = wd[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * vd[j]; \ }, { \ result[i] = numer/denom; \ }) } spatstat.explore/src/crossloop.h0000755000176200001440000000356214611073311016563 0ustar liggesusers/* crossloop.h Generic code template for loop for cross-close-pairs operations collecting contributions to point x_i from all points y_j such that ||x_i - y_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n1, n2, maxchunk, jleft; double x1i, y1i, xleft, dx, dy, d2, rmax, r2max; double *x1, *y1, *x2, *y2; $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define CROSSLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n1, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n1, maxchunk, 65536) { \ \ x1i = x1[i]; \ y1i = y1[i]; \ \ INITIAL_I; \ \ jleft = 0; \ \ /* \ adjust starting point jleft \ */ \ xleft = x1i - rmax; \ while((x2[jleft] < xleft) && (jleft+1 < n2)) \ ++jleft; \ \ /* \ process from j = jleft until dx > rmax \ */ \ for(j=jleft; j < n2; j++) { \ dx = x2[j] - x1i; \ if(dx > rmax) \ break; \ dy = y2[j] - y1i; \ d2 = dx * dx + dy * dy; \ if(d2 <= r2max) { \ /* add this (i, j) pair to output */ \ CONTRIBUTE_IJ; \ } \ } \ COMMIT_I; \ } \ } spatstat.explore/src/Knone.c0000755000176200001440000000172014611073311015577 0ustar liggesusers#include #include #include /* Knone.c Efficient computation of uncorrected estimates of K for large datasets KnoneI() Estimates K function, returns integer numerator KnoneD() Estimates K function, returns double precision numerator Kwnone() Estimates Kinhom, returns double precision numerator Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef WEIGHTED #define FNAME KnoneI #define OUTTYPE int #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME KnoneD #define OUTTYPE double #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME Kwnone #define WEIGHTED #define OUTTYPE double #include "Knone.h" spatstat.explore/src/idw.c0000755000176200001440000002162714611073311015320 0ustar liggesusers/* idw.c Inverse-distance weighted smoothing $Revision: 1.13 $ $Date: 2022/10/21 10:43:01 $ Cidw inverse distance smoothing from data points onto pixel grid idwloo leave-one-out estimate at data points Cidw2 Cidw with variance estimate idwloo2 idwloo with variance estimate Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #include #include #include "chunkloop.h" #define MAT(X,I,J,NROW) (X)[(J) + (NROW) * (I)] /* inverse-distance smoothing from data points onto pixel grid */ void Cidw( /* data points and values */ double *x, double *y, double *v, int *n, /* pixel grid */ double *xstart, double *xstep, int *nx, double *ystart, double *ystep, int *ny, /* exponent for IDW */ double *power, /* output arrays - assumed initialised 0 */ double *num, double *den, double *rat ) { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w, sumw, sumwv; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; sumwv += w * v[i]; sumw += w; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); sumwv += w * v[i]; sumw += w; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; } } } } /* Leave-one-out IDW at data points only */ void idwloo( /* data points and values */ double *x, double *y, double *v, int *n, /* exponent for IDW */ double *power, /* output vectors - assumed initialised 0 */ double *num, double *den, double *rat ) { int N, i, j, maxchunk; double xi, yi, d2, w, pon2, sumw, sumwv; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = 0.0; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * v[j]; sumw += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * v[j]; sumw += w; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = 0.0; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * v[j]; sumw += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * v[j]; sumw += w; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; } } } } /* ---------------------------------------------------- VERSIONS WITH VARIANCE CALCULATION --------------------------------------------------- */ /* inverse-distance smoothing from data points onto pixel grid */ void Cidw2( /* data points and values */ double *x, double *y, double *v, int *n, /* pixel grid */ double *xstart, double *xstep, int *nx, double *ystart, double *ystep, int *ny, /* exponent for IDW */ double *power, /* output arrays - assumed initialised 0 */ double *num, double *den, double *rat, /* output arrays - assumed initialised 0 */ double *mtwo, double *wtwo ) { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w, vi, sumw, sumwv, sumw2, runmean, m2, delta, epsilon; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = sumw2 = m2 = runmean = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { vi = v[i]; d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; sumw += w; sumw2 += w * w; sumwv += w * vi; delta = vi - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; MAT(mtwo, ix, iy, Ny) = m2; MAT(wtwo, ix, iy, Ny) = sumw2; } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { sumwv = sumw = sumw2 = m2 = runmean = 0.0; /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { vi = v[i]; d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); sumw += w; sumw2 += w * w; sumwv += w * vi; delta = vi - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } /* compute ratio */ MAT(num, ix, iy, Ny) = sumwv; MAT(den, ix, iy, Ny) = sumw; MAT(rat, ix, iy, Ny) = sumwv/sumw; MAT(mtwo, ix, iy, Ny) = m2; MAT(wtwo, ix, iy, Ny) = sumw2; } } } } /* Leave-one-out IDW at data points only */ void idwloo2( /* data points and values */ double *x, double *y, double *v, int *n, /* exponent for IDW */ double *power, /* output vectors - initialised 0 */ double *num, double *den, double *rat, double *mtwo, double *wtwo ) { int N, i, j, maxchunk; double xi, yi, d2, w, pon2, vj, sumw, sumwv, sumw2, runmean, m2, delta, epsilon; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = sumw2 = m2 = runmean = 0.0; if(i > 0) { for(j = 0; j < i; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } if(i < N-1) { for(j = i+1; j < N; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; mtwo[i] = m2; wtwo[i] = sumw2; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; sumwv = sumw = sumw2 = m2 = runmean = 0.0; if(i > 0) { for(j = 0; j < i; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } if(i < N-1) { for(j = i+1; j < N; j++) { vj = v[j]; d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); sumwv += w * vj; sumw += w; sumw2 += w * w; delta = vj - runmean; epsilon = delta * w / sumw; runmean += epsilon; m2 += (sumw - w) * delta * epsilon; } } /* compute ratio */ num[i] = sumwv; den[i] = sumw; rat[i] = sumwv/sumw; mtwo[i] = m2; wtwo[i] = sumw2; } } } } spatstat.explore/src/KrectBody.h0000755000176200001440000001042514611073311016422 0ustar liggesusers /* KrectBody.h +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Main function body for 'Krect' Included multiple times with different values of the macros: (#define or #undef) WEIGHTED ISOTROPIC TRANSLATION BORDER UNCORRECTED **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Variables are declared in 'KrectFunDec.c' This algorithm is optimal (amongst the choices in spatstat) when the window is a rectangle *and* at least one of the ISOTROPIC, TRANSLATION corrections is needed. There are faster algorithms for the border correction on its own. $Revision: 1.3 $ $Date: 2014/02/09 03:01:27 $ */ /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < N) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > N) maxchunk = N; /* ............. LOOP OVER i ................. */ for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; #ifdef WEIGHTED wi = w[i]; #endif #ifdef BORDER /* For border correction */ /* compute distance to border */ bx = MIN(xi, (wide - xi)); by = MIN(yi, (high - yi)); bdisti = MIN(bx, by); /* denominator will ultimately be incremented for all r < b[i] */ bratio = bdisti/rstep; /* lbord is the largest integer STRICTLY less than bratio */ lbord = (int) ceil(bratio) - 1; lbord = (lbord <= Nr1) ? lbord : Nr1; /* increment entry corresponding to r = b[i] */ #ifdef WEIGHTED if(lbord >= 0) denomAccum[lbord] += wi; #else if(lbord >= 0) (denomAccum[lbord])++; #endif #endif #ifdef ISOTROPIC /* For isotropic correction */ /* perpendicular distance from point i to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xi; dR = wide - xi; dD = yi; dU = high - yi; /* test for corner of the rectangle */ ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2); /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); #endif /* ............. LOOP OVER j ................. */ /* scan through points (x[j],y[j]) */ /* scan backward from i-1 until |x[j]-x[i]| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = xi - x[j]; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } /* scan forward from i+1 until x[j]-x[i] > Rmax */ if(i < N1) { for(j=i+1; j < N; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= R2max) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < R2max) { #include "KrectIncrem.h" } } } } } /* .................. END OF LOOPS ................................ */ /* ............. compute cumulative functions ..................... */ #ifdef UNCORRECTED naccum = ZERO; for(l = 0; l < Nr; l++) { unco[l] += naccum; naccum = unco[l]; } #endif #ifdef ISOTROPIC accum = 0.0; for(l = 0; l < Nr; l++) { iso[l] += accum; accum = iso[l]; } #endif #ifdef TRANSLATION accum = 0.0; for(l = 0; l < Nr; l++) { trans[l] += accum; accum = trans[l]; } #endif #ifdef BORDER /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=Nr1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; bdenom[l] = daccum; naccum += numerHighAccum[l]; bnumer[l] = naccum; naccum -= numerLowAccum[l]; } #endif spatstat.explore/src/functable.h0000755000176200001440000000310214611073311016471 0ustar liggesusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions of C structures for spatial statistics function estimates. Usually the estimates are of the form f^(x) = a^(x)/b^(x); we store f^ and also a^ and b^ to cater for applications with replicated data. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ typedef struct Ftable { /* double precision function table */ double t0; double t1; int n; /* number of entries */ double *f; double *num; /* f[i] = num[i]/denom[i] */ double *denom; } Ftable; typedef struct Itable { /* integer count table e.g for histograms */ double t0; double t1; int n; int *num; int *denom; /* usually p[i] = num[i]/denom[i] */ } Itable; typedef struct H4table { /* Four histograms, for censored data */ double t0; double t1; int n; int *obs; /* observed lifetimes: o_i = min(t_i, c_i) */ int *nco; /* uncensored lifetimes: o_i for which t_i <= c_i */ int *cen; /* censoring times: c_i */ int *ncc; /* censor times of uncensored data: c_i for which t_i <= c_i */ int upperobs; /* number of o_i that exceed t1 */ int uppercen; /* number of c_i that exceed t1 */ } H4table; spatstat.explore/src/constants.h0000755000176200001440000000074714611073311016556 0ustar liggesusers/* constants.h Ensure that required constants are defined (Insurance against flaky installations) $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #ifndef M_PI #define M_PI 3.141592653589793 #endif #ifndef M_PI_2 #define M_PI_2 1.570796326794897 #endif #ifndef M_2_PI #define M_2_PI (2.0/M_PI) #endif #ifndef M_2PI #define M_2PI 6.283185307179586 #endif spatstat.explore/src/pcf3.c0000755000176200001440000001227014611073311015362 0ustar liggesusers#include #include #include #include #include "geom3.h" #include "functable.h" #include "chunkloop.h" #include "constants.h" /* $Revision: 1.10 $ $Date: 2022/11/02 10:17:55 $ pair correlation function of 3D point pattern (Epanechnikov kernel) pcf3trans translation correction pcf3isot isotropic correction Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2022 Licence: GNU Public Licence >= 2 */ #define FOURPI (2.0 * M_2PI) void pcf3trans( Point *p, int n, Box *b, Ftable *pcf, double delta ) { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; register double vx, vy, vz, tval; Point *ip, *jp; double dt, vol, lambda, denom; double coef, twocoef, frac, invweight, kernel; double sphesfrac(Point *point, Box *box, double r); double sphevol(Point *point, Box *box, double r); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { /* compute pairwise distance */ jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute (inverse) edge correction weight */ vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); invweight = vx * vy * vz * FOURPI * dist * dist; if(invweight > 0.0) { for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel / invweight; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* multiplied by 2 because we only visited i < j pairs */ twocoef = 2.0 * coef; /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= twocoef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0) ? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } void pcf3isot( Point *p, int n, Box *b, Ftable *pcf, double delta ) { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, mass, tval; double coef, frac, kernel; double sphesfrac(Point *point, Box *box, double r); double sphevol(Point *point, Box *box, double r); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute edge correction weight */ mass = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); mass *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; if(mass > 0.0) { mass /= FOURPI * dist * dist; for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel * mass; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= coef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0)? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } spatstat.explore/src/Knone.h0000755000176200001440000000560014611073311015605 0ustar liggesusers/* Knone.h Code template for K function estimators in Knone.c Variables: FNAME function name OUTTYPE storage type of the output 'numer' ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.7 $ $Date: 2022/10/21 10:43:01 $ */ void FNAME( /* inputs */ int *nxy, double *x, double *y, #ifdef WEIGHTED double *w, #endif int *nr, double *rmax, /* output */ OUTTYPE *numer ) { int i, j, l, n, nt, n1, lmin, lmax, maxchunk; double dt, tmax, tmax2, xi, yi; double dratio, dij, dij2, dx, dy, dx2; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; lmax = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; tmax2 = tmax * tmax; /* initialise */ for(l = 0; l < nt; l++) numer[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { #ifdef WEIGHTED wi = w[i]; #endif xi = x[i]; yi = y[i]; /* scan backward from i-1 until x[j] < x[i] -tmax or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* effectively increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } /* scan forward from i+1 until x[j] > x[i] + tmax or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } } } /* Now accumulate the numerator. */ if(nt > 1) for(l=1; l < nt; l++) numer[l] += numer[l-1]; } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat.explore/src/denspt.c0000755000176200001440000003272014611073311016026 0ustar liggesusers#include #include #include "chunkloop.h" #include "pairloop.h" #include "constants.h" /* denspt.c Calculation of density estimate at data points $Revision: 1.26 $ $Date: 2023/04/02 00:17:21 $ Assumes point pattern is sorted in increasing order of x coordinate *denspt* Density estimate at points *smoopt* Smoothed mark values at points Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define TWOPI M_2PI double sqrt(double x); double exp(double x); #define STD_DECLARATIONS \ int n, i, j, maxchunk; \ double xi, yi, rmax, r2max, dx, dy, dx2, d2 #define STD_INITIALISE \ n = *nxy; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void denspt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance at which points contribute */ double *sig, /* Gaussian sd */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of computed density values */ ) { STD_DECLARATIONS; double resulti, coef; double sigma, a; STD_INITIALISE; if(n == 0) return; sigma = *sig; a = 1.0/(2.0 * sigma * sigma); coef = 1.0/(TWOPI * sigma * sigma); if(*squared) { coef *= coef; a *= 2.0; } PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2 * a); } , { result[i] = coef * resulti; }) } void wtdenspt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance */ double *sig, /* Gaussian sd */ double *weight, /* vector of weights */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of weighted density values */ ) { STD_DECLARATIONS; double resulti, coef; double sigma, a; STD_INITIALISE; if(n == 0) return; sigma = *sig; a = 1.0/(2.0 * sigma * sigma); coef = 1.0/(TWOPI * sigma * sigma); if(*squared) { coef *= coef; a *= 2.0; } PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2 * a); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void adenspt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance at which points contribute */ double *detsigma, /* determinant of variance matrix */ double *sinv, /* inverse variance matrix (2x2, flattened) */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of density values */ ) { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; if(n == 0) return; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); if(*squared) { coef *= coef; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; } else { s11 = sinv[0]/2.0; s12 = sinv[1]/2.0; s21 = sinv[2]/2.0; s22 = sinv[3]/2.0; } PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22)); }, { result[i] = coef * resulti; }) } void awtdenspt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance at which points contribute */ double *detsigma, /* determinant of variance matrix */ double *sinv, /* inverse variance matrix (2x2, flattened) */ double *weight, /* vector of weights */ int *squared, /* whether to use the squared kernel */ /* output */ double *result /* vector of weighted density values */ ) { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); if(n == 0) return; if(*squared) { coef *= coef; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; } else { s11 = sinv[0]/2.0; s12 = sinv[1]/2.0; s21 = sinv[2]/2.0; s22 = sinv[3]/2.0; } PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void smoopt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *v, /* vector of mark values to be smoothed */ int *self, /* 0 if leave-one-out */ double *rmaxi, /* maximum distance at which points contribute */ double *sig, /* Gaussian sd */ /* output */ double *result /* vector of computed smoothed values */ ) { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void wtsmoopt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *v, /* vector of mark values to be smoothed */ int *self, /* 0 if leave-one-out */ double *rmaxi, /* maximum distance */ double *sig, /* Gaussian sd */ double *weight, /* vector of weights */ /* output */ double *result /* vector of computed smoothed values */ ) { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ------------- anisotropic versions -------------------- */ void asmoopt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *v, /* vector of mark values to be smoothed */ int *self, /* 0 if leave-one-out */ double *rmaxi, /* maximum distance at which points contribute */ double *sinv, /* inverse variance matrix (2x2, flattened) */ /* output */ double *result /* vector of smoothed values */ ) { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void awtsmoopt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *v, /* vector of mark values to be smoothed */ int *self, /* 0 if leave-one-out */ double *rmaxi, /* maximum distance at which points contribute */ double *sinv, /* inverse variance matrix (2x2, flattened) */ double *weight, /* vector of weights */ /* output */ double *result /* vector of smoothed values */ ) { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } /* ----------------- transformed coordinates -------------------- */ /* The following functions assume that x, y have been transformed by the inverse of the variance matrix, and subsequently scaled by 1/sqrt(2) so that the Gaussian density is proportional to exp(-(x^2+y^2)). Constant factor in density is omitted. */ void Gdenspt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance at which points contribute */ /* output */ double *result /* vector of computed density values */ ) { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2); } , { result[i] = resulti; }) } void Gwtdenspt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *rmaxi, /* maximum distance */ double *weight, /* vector of weights */ /* output */ double *result /* vector of weighted density values */ ) { STD_DECLARATIONS; double resulti; STD_INITIALISE; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2); }, { result[i] = resulti; } ) } void Gsmoopt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *v, /* vector of mark values to be smoothed */ int *self, /* 0 if leave-one-out */ double *rmaxi, /* maximum distance at which points contribute */ /* output */ double *result /* vector of computed smoothed values */ ) { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += 1; \ numer += v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } void Gwtsmoopt( /* inputs */ int *nxy, /* number of (x,y) points */ double *x, double *y, /* (x,y) coordinates */ double *v, /* vector of mark values to be smoothed */ int *self, /* 0 if leave-one-out */ double *rmaxi, /* maximum distance */ double *weight, /* vector of weights */ /* output */ double *result /* vector of computed smoothed values */ ) { STD_DECLARATIONS; int countself; double numer, denom, wij; STD_INITIALISE; countself = *self; if(n == 0) return; if(countself != 0) { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ denom += weight[i]; \ numer += weight[i] * v[i]; \ result[i] = numer/denom; \ }) } else { PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ result[i] = numer/denom; \ }) } } spatstat.explore/src/Krect.c0000755000176200001440000000373714611073311015607 0ustar liggesusers/* Krect.c $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ +++ Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2014 ++++ Fast code for K function in rectangular case. **Assumes point pattern is sorted in increasing order of x coordinate** **Assumes window is (0,wide) x (0, high) ** **Assumes output vectors were initialised to zero** Krect.c defines three interface functions, for weighted, unweighted double, and unweighted integer cases KrectFunDec.h (#included thrice) Function declaration, arguments, storage allocation KrectV1.h split according to whether Isotropic Correction is wanted Macro ISOTROPIC is #defined KrectV2.h split according to whether Translation Correction is wanted Macro TRANSLATION is #defined KrectV3.h split according to whether Border Correction is wanted Macro BORDER is #defined KrectV4.h split according to whether Uncorrected estimate is wanted Macro UNCORRECTED is #defined KrectBody.h Function body, including loops over i and j KrectIncrem.h (#included twice) Code performed when a close pair of points has been found: calculate edge corrections, increment results. */ #include #include #include /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < 1.0e-12) ? 1 : 0) #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectInt #define COUNTTYPE int #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectDbl #define COUNTTYPE double #include "KrectFunDec.h" #undef FNAME #undef WEIGHTED #undef COUNTTYPE #define FNAME KrectWtd #define COUNTTYPE double #define WEIGHTED #include "KrectFunDec.h" spatstat.explore/src/loccumx.h0000755000176200001440000000401014611073311016177 0ustar liggesusers/* loccumx.h C template for loccum.c grid-to-data or data-cross-data functions $Revision: 1.7 $ $Date: 2022/10/21 10:43:01 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME( /* inputs */ int *ntest, double *xtest, double *ytest, int *ndata, double *xdata, double *ydata, double *vdata, int *nr, double *rmax, /* output */ double *ans /* matrix of column vectors of functions for each point of first pattern */ ) { int Ntest, Ndata, Nr, Nans; double Rmax; int i, j, k, jleft, kmin, maxchunk, columnstart; double Rmax2, rstep, xtesti, ytesti, xleft; double dx, dy, dx2, d2, d, contrib; Ntest = *ntest; Ndata = *ndata; Nr = *nr; Rmax = *rmax; if(Ntest == 0) return; Nans = Nr * Ntest; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } if(Ndata == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; jleft = 0; OUTERCHUNKLOOP(i, Ntest, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ntest, maxchunk, 8196) { xtesti = xtest[i]; ytesti = ytest[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* adjust starting point */ xleft = xtesti - Rmax; while((xdata[jleft] < xleft) && (jleft+1 < Ndata)) ++jleft; /* process from jleft until |dx| > Rmax */ for(j=jleft; j < Ndata; j++) { dx = xdata[j] - xtesti; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = ydata[j] - ytesti; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = vdata[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } spatstat.explore/src/k3.c0000755000176200001440000000734414611073311015052 0ustar liggesusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.3 $ $Date: 2022/10/22 10:09:51 $ K function of 3D point pattern k3trans translation correction k3isot isotropic correction # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2022. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ void k3trans( Point *p, int n, Box *b, Ftable *k ) { register int i, j, l, lmin; register double dx, dy, dz, dist; register double vx, vy, vz; Point *ip, *jp; double dt, vol, lambda, denom, term; double sphesfrac(Point *point, Box *box, double r); double sphevol(Point *point, Box *box, double r); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); if(vx >= 0.0 && vy >= 0.0 && vz >= 0.0) { term = 2.0 /(vx * vy * vz); /* 2 because they're ordered pairs */ for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } void k3isot( Point *p, int n, Box *b, Ftable *k ) { register int i, j, l, lmin; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, term; Point vertex; Box half; double sphesfrac(Point *point, Box *box, double r); double sphevol(Point *point, Box *box, double r); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; term = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); term *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } spatstat.explore/src/call3d.c0000755000176200001440000002460314611073311015674 0ustar liggesusers/* $Revision: 1.8 $ $Date: 2022/11/02 10:18:16 $ R interface Pass data between R and internally-defined data structures # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2022 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #include #include "geom3.h" #include "functable.h" #undef DEBUG #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif void g3one(Point *p, int n, Box *b, Ftable *g); void g3three(Point *p, int n, Box *b, Ftable *g); void g3cen(Point *p, int n, Box *b, H4table *count); void k3trans(Point *p, int n, Box *b, Ftable *k); void k3isot(Point *p, int n, Box *b, Ftable *k); void pcf3trans(Point *p, int n, Box *b, Ftable *pcf, double delta); void pcf3isot(Point *p, int n, Box *b, Ftable *pcf, double delta); void phatminus(Point *p, int n, Box *b, double vside, Itable *count); void phatnaive(Point *p, int n, Box *b, double vside, Itable *count); void p3hat4(Point *p, int n, Box *b, double vside, H4table *count); /* ALLOCATION OF SPACE FOR STRUCTURES/ARRAYS We have defined an alloc() and free() function for each type. However, the free() functions currently do nothing, because we use R_alloc to allocate transient space, which is freed automatically by R. */ Ftable * allocFtable(int n) /* allocate function table of size n */ { Ftable *x; x = (Ftable *) R_alloc(1, sizeof(Ftable)); x->n = n; x->f = (double *) R_alloc(n, sizeof(double)); x->num = (double *) R_alloc(n, sizeof(double)); x->denom = (double *) R_alloc(n, sizeof(double)); return(x); } void freeFtable(Ftable *x) { } Itable * allocItable(int n) { Itable *x; x = (Itable *) R_alloc(1, sizeof(Itable)); x->n = n; x->num = (int *) R_alloc(n, sizeof(int)); x->denom = (int *) R_alloc(n, sizeof(int)); return(x); } void freeItable(Itable *x) { } H4table * allocH4table(int n) { H4table *x; x = (H4table *) R_alloc(1, sizeof(H4table)); x->n = n; x->obs = (int *) R_alloc(n, sizeof(int)); x->nco = (int *) R_alloc(n, sizeof(int)); x->cen = (int *) R_alloc(n, sizeof(int)); x->ncc = (int *) R_alloc(n, sizeof(int)); return(x); } void freeH4table(H4table *x) { } Box * allocBox(void) /* I know this is ridiculous but it's consistent. */ { Box *b; b = (Box *) R_alloc(1, sizeof(Box)); return(b); } void freeBox(Box *x) { } Point * allocParray(int n) /* allocate array of n Points */ { Point *p; p = (Point *) R_alloc(n, sizeof(Point)); return(p); } void freeParray(Point *x) { } /* CREATE AND INITIALISE DATA STORAGE */ Ftable * MakeFtable(double *t0, double *t1, int *n) { Ftable *tab; int i, nn; nn = *n; tab = allocFtable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->f[i] = 0.0; tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } Itable * MakeItable(double *t0, double *t1, int *n) { Itable *tab; int i, nn; nn = *n; tab = allocItable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } H4table * MakeH4table(double *t0, double *t1, int *n) { H4table *tab; int i, nn; nn = *n; tab = allocH4table(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->obs[i] = 0; tab->nco[i] = 0; tab->cen[i] = 0; tab->ncc[i] = 0; } tab->upperobs = 0; tab->uppercen = 0; return(tab); } /* CONVERSION OF DATA TYPES R -> internal including allocation of internal data types as needed */ Point * RtoPointarray(double *x, double *y, double *z, int *n) { int i, nn; Point *p; nn = *n; p = allocParray(nn); for(i = 0; i < nn; i++) { p[i].x = x[i]; p[i].y = y[i]; p[i].z = z[i]; } return(p); } Box * RtoBox(double *x0, double *x1, double *y0, double *y1, double *z0, double *z1) { Box *b; b = allocBox(); b->x0 = *x0; b->x1 = *x1; b->y0 = *y0; b->y1 = *y1; b->z0 = *z0; b->z1 = *z1; return(b); } /* CONVERSION OF DATA TYPES internal -> R Note: it can generally be assumed that the R arguments are already allocated vectors of correct length, so we do not allocate them. */ void FtabletoR( /* internal */ Ftable *tab, /* R representation */ double *t0, double *t1, int *n, double *f, double *num, double *denom ) { int i; *t0 = tab->t0; *t1 = tab->t1; *n = tab->n; for(i = 0; i < tab->n; i++) { f[i] = tab->f[i]; num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeFtable(tab); } void ItabletoR( /* internal */ Itable *tab, /* R representation */ double *t0, double *t1, int *m, int *num, int *denom ) { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; for(i = 0; i < tab->n; i++) { num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeItable(tab); } void H4tabletoR( /* internal */ H4table *tab, /* R representation */ double *t0, double *t1, int *m, int *obs, int *nco, int *cen, int *ncc, int *upperobs, int *uppercen ) { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; *upperobs = tab->upperobs; *uppercen = tab->uppercen; for(i = 0; i < tab->n; i++) { obs[i] = tab->obs[i]; nco[i] = tab->nco[i]; cen[i] = tab->cen[i]; ncc[i] = tab->ncc[i]; } freeH4table(tab); } /* R CALLING INTERFACE These routines are called from R by > .C("routine-name", ....) */ void RcallK3( /* points */ double *x, double *y, double *z, int *n, /* box */ double *x0, double *x1, double *y0, double *y1, double *z0, double *z1, /* Ftable */ double *t0, double *t1, int *m, double *f, double *num, double *denom, /* edge correction */ int *method ) { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: k3trans(p, (int) *n, b, tab); break; case 1: k3isot(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); k3trans(p, (int) *n, b, tab); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3( /* points */ double *x, double *y, double *z, int *n, /* box */ double *x0, double *x1, double *y0, double *y1, double *z0, double *z1, /* Ftable */ double *t0, double *t1, /* Ftable */ int *m, double *f, double *num, double *denom, /* edge correction */ int *method ) { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch(*method) { case 1: g3one(p, (int) *n, b, tab); break; case 3: g3three(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 3\n", *method); g3three(p, (int) *n, b, tab); } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3cen( /* points */ double *x, double *y, double *z, int *n, /* box */ double *x0, double *x1, double *y0, double *y1, double *z0, double *z1, /* H4table */ double *t0, double *t1, int *m, int *obs, int *nco, int *cen, int *ncc, int *upperobs, int *uppercen ) { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside RcallG3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); g3cen(p, (int) *n, b, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving RcallG3cen\n") } void RcallF3( /* points */ double *x, double *y, double *z, int *n, /* box */ double *x0, double *x1, double *y0, double *y1, double *z0, double *z1, /* voxel size */ double *vside, /* Itable */ double *t0, double *t1, int *m, int *num, int *denom, /* edge correction */ int *method ) { Point *p; Box *b; Itable *count; DEBUGMESSAGE("Inside Rcall_f3\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeItable(t0, t1, m); switch((int) *method) { case 0: phatnaive(p, (int) *n, b, *vside, count); break; case 1: phatminus(p, (int) *n, b, *vside, count); break; default: Rprintf("Method %d not recognised: defaults to 1\n", *method); phatminus(p, (int) *n, b, *vside, count); } ItabletoR(count, t0, t1, m, num, denom); DEBUGMESSAGE("Leaving Rcall_f3\n") } void RcallF3cen( /* points */ double *x, double *y, double *z, int *n, /* box */ double *x0, double *x1, double *y0, double *y1, double *z0, double *z1, /* voxel size */ double *vside, /* H4table */ double *t0, double *t1, int *m, int *obs, int *nco, int *cen, int *ncc, int *upperobs, int *uppercen ) { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside Rcallf3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); p3hat4(p, (int) *n, b, *vside, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving Rcallf3cen\n") } void Rcallpcf3( /* points */ double *x, double *y, double *z, int *n, /* box */ double *x0, double *x1, double *y0, double *y1, double *z0, double *z1, /* Ftable */ double *t0, double *t1, int *m, double *f, double *num, double *denom, /* edge correction */ int *method, /* Epanechnikov kernel halfwidth */ double *delta ) { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: pcf3trans(p, (int) *n, b, tab, (double) *delta); break; case 1: pcf3isot(p, (int) *n, b, tab, (double) *delta); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); pcf3trans(p, (int) *n, b, tab, (double) *delta); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } spatstat.explore/NAMESPACE0000644000176200001440000004356014737444216015035 0ustar liggesusers## spatstat.explore NAMESPACE file ## ................ Import packages .................. import(stats,graphics,grDevices,utils,methods) import(spatstat.utils,spatstat.data, spatstat.univar,spatstat.sparse, spatstat.geom,spatstat.random) import(goftest) import(Matrix) importFrom(abind,abind) importFrom(nlme, collapse) ## ................ Load dynamic library .............. ## (native routines are registered in init.c) ## (entry points are symbols with prefix "SE_") useDynLib(spatstat.explore, .registration=TRUE, .fixes="SE_") ## ////////// DO NOT EDIT THE FOLLOWING /////////////// ## //////// it is generated automatically ///////////// # .................................................. # Automatically-generated list of documented objects # .................................................. export("$<-.fv") export("adaptive.density") export("adjust.ratfv") export("allstats") export("alltypes") export("ang2rad") export("apply.ssf") export("as.data.frame.bw.optim") export("as.data.frame.envelope") export("as.data.frame.fv") export("as.function.fv") export("as.function.rhohat") export("as.function.ssf") export("as.fv") export("as.fv.bw.optim") export("as.fv.data.frame") export("as.fv.fasp") export("as.fv.fv") export("as.fv.matrix") export("as.im.scan.test") export("as.im.ssf") export("as.owin.quadrattest") export("as.ppp.ssf") export("assemble.plot.objects") export("as.tess.quadrattest") export("auc") export("auc.ppp") export("bandwidth.is.infinite") export("BartCalc") export("berman.test") export("bermantestCalc") export("bermantestEngine") export("berman.test.ppp") export("bind.fv") export("bind.ratfv") export("bits.envelope") export("bits.test") export("blur") export("blurHeat") export("blurHeat.im") export("boyce") export("bw.abram.ppp") export("bw.CvL") export("bw.CvL.adaptive") export("bw.CvLHeat") export("bw.diggle") export("bw.frac") export("bw.optim") export("bw.pcf") export("bw.ppl") export("bw.pplHeat") export("bw.relrisk") export("bw.relriskHeatppp") export("bw.relrisk.ppp") export("bw.scott") export("bw.scott.iso") export("bw.smoothppp") export("bw.stoyan") export("calc.DR") export("calc.NNIR") export("calc.SAVE") export("calc.SIR") export("calc.TSE") export("cbind.fv") export("cdf.test") export("cdf.test.ppp") export("censtimeCDFest") export("check.testfun") export("circdensity") export("circticks") export("clarkevans") export("clarkevansCalc") export("clarkevans.test") export("clusterset") export("collapse.anylist") export("collapse.fv") export("compatible.fasp") export("compatible.fv") export("compatible.rat") export("compileCDF") export("compileK") export("compilepcf") export("Complex.fasp") export("Complex.fv") export("conform.ratfv") export("contour.ssf") export("cor.im") export("cov.im") export("CressieReadName") export("CressieReadStatistic") export("CressieReadSymbol") export("cutoff2Dkernel") export("CVforPCF") export("dclf.progress") export("dclf.sigtrace") export("dclf.test") export("densityAdaptiveKernel.ppp") export("densityAdaptiveKernel.ppplist") export("densityAdaptiveKernel.splitppp") export("densitycrossEngine") export("densityfun") export("densityfun.ppp") export("densityHeat") export("densityHeat.ppp") export("densitypointsEngine") export("density.ppp") export("density.ppplist") export("density.psp") export("density.splitppp") export("densityVoronoi") export("densityVoronoi.ppp") export("deriv.fv") export("Deviation") export("dg.envelope") export("dg.progress") export("dg.sigtrace") export("dg.test") export("digestCovariates") export("digital.volume") export("dim.fasp") export("dimhat") export("dimnames.fasp") export("dimnames<-.fasp") export("distcdf") export("distributecbind") export("domain.quadrattest") export("edge.Ripley") export("edge.Trans") export("Emark") export("ensure.listarg") export("envelope") export("envelopeArray") export("envelopeEngine") export("envelope.envelope") export("envelope.hasenvelope") export("envelope.matrix") export("envelope.pp3") export("envelope.ppp") export("envelopeProgressData") export("envelopeTest") export("evalCovar") export("eval.fasp") export("eval.fv") export("evaluate2Dkernel") export("evaluateCovariate") export("evaluateCovariateAtPixels") export("evaluateCovariateAtPoints") export("exactppm") export("ExpSmoothLog") export("extractAtomicQtests") export("f3Cengine") export("f3engine") export("F3est") export("[.fasp") export("fasp") export("Fest") export("Fhazard") export("findbestlegendpos") export("findcbind") export("Finhom") export("flatfname") export("Fmulti.inhom") export("FmultiInhom") export("FormatFaspFormulae") export("formula<-") export("formula.fv") export("formula<-.fv") export("fryplot") export("frypoints") export("[.fv") export("[<-.fv") export("fv") export("fvexprmap") export("fvlabelmap") export("fvlabels") export("fvlabels<-") export("fvlegend") export("fvnames") export("fvnames<-") export("g3Cengine") export("g3engine") export("G3est") export("Gcross") export("Gcross.inhom") export("Gdot") export("Gdot.inhom") export("Gest") export("getSumFun") export("Gfox") export("Ginhom") export("Gmulti") export("Gmulti.inhom") export("GmultiInhom") export("good.correction.K") export("harmonise.fv") export("harmonize.fv") export("hasenvelope") export("Hest") export("hopskel") export("hopskel.test") export("hotbox") export("idw") export("Iest") export("image.ssf") export("implemented.for.K") export("increment.fv") export("integral.fv") export("integral.ssf") export("is.atomicQtest") export("is.poisson.exactppm") export("is.scov") export("is.stationary.exactppm") export("Jcross") export("Jcross.inhom") export("Jdot") export("Jdot.inhom") export("Jest") export("Jfox") export("Jinhom") export("Jmulti") export("Jmulti.inhom") export("k3engine") export("K3est") export("Kborder.engine") export("Kcross") export("Kcross.inhom") export("Kdot") export("Kdot.inhom") export("Kest") export("Kest.fft") export("Kinhom") export("Kmark") export("Kmeasure") export("Kmulti") export("Kmulti.inhom") export("Knone.engine") export("Kount") export("Krect.engine") export("Kscaled") export("Ksector") export("Kwtsum") export("laslett") export("Lcross") export("Lcross.inhom") export("Ldot") export("Ldot.inhom") export("Lest") export("Linhom") export("localK") export("localKcross") export("localKcross.inhom") export("localKdot") export("localKengine") export("localKinhom") export("localKmultiEngine") export("localL") export("localLcross") export("localLcross.inhom") export("localLdot") export("localLinhom") export("localpcf") export("localpcfengine") export("localpcfinhom") export("[.localpcfmatrix") export("localpcfmatrix") export("lohboot") export("lookup2DkernelInfo") export("Lscaled") export("mad.progress") export("mad.sigtrace") export("mad.test") export("makefvlabel") export("markconnect") export("markcorr") export("markcorrint") export("markcrosscorr") export("markmarkscatter") export("markmean") export("marks.ssf") export("marks<-.ssf") export("marktable") export("markvar") export("markvario") export("maskLaslett") export("match2DkernelName") export("Math.fasp") export("Math.fv") export("max.ssf") export("mctest.progress") export("mctest.sigtrace") export("mctestSigtraceEngine") export("meanlistfv") export("min.ssf") export("miplot") export("names<-.fv") export("nearest.neighbour") export("nnclean") export("nncleanEngine") export("nnclean.pp3") export("nnclean.ppp") export("nncorr") export("nndensity") export("nndensity.ppp") export("nnmean") export("nnorient") export("nnvario") export("Ops.fasp") export("Ops.fv") export("pairMean") export("pairorient") export("pairs.im") export("pairs.listof") export("pairs.solist") export("panel.contour") export("panel.histogram") export("panel.image") export("pcf") export("pcf3engine") export("pcf3est") export("pcfcross") export("pcfcross.inhom") export("pcfdot") export("pcfdot.inhom") export("pcf.fasp") export("pcf.fv") export("pcfinhom") export("pcfmulti") export("pcfmulti.inhom") export("pcf.ppp") export("plot.bermantest") export("plot.bw.frac") export("plot.bw.optim") export("plot.cdftest") export("plot.envelope") export("plot.fasp") export("plot.fv") export("plot.laslett") export("plot.localpcfmatrix") export("plot.plotpairsim") export("plot.quadrattest") export("plot.rho2hat") export("plot.rhohat") export("plot.scan.test") export("plot.spatialcdf") export("plot.ssf") export("plot.studpermutest") export("polyLaslett") export("pool") export("pool.anylist") export("pool.envelope") export("pool.fasp") export("pool.fv") export("pool.quadrattest") export("pool.rat") export("PPversion") export("predict.exactppm") export("predict.rho2hat") export("predict.rhohat") export("prefixfv") export("print.bw.frac") export("print.bw.optim") export("print.densityfun") export("print.envelope") export("print.exactppm") export("print.fasp") export("print.fv") export("print.fvfun") export("print.hasenvelope") export("print.laslett") export("print.localpcfmatrix") export("print.plotpairsim") export("print.quadrattest") export("print.rat") export("print.rho2hat") export("print.rhohat") export("print.Smoothfun") export("print.ssf") export("print.summary.bw.optim") export("print.summary.ssf") export("QQversion") export("quadrat.test") export("quadrat.testEngine") export("quadrat.test.ppp") export("quadrat.test.quadratcount") export("quadrat.test.splitppp") export("radcumint") export("range.ssf") export("[.rat") export("rat") export("ratfv") export("rebadge.as.crossfun") export("rebadge.as.dotfun") export("rebadge.fv") export("rebadgeLabels") export("reconcile.fv") export("rectcontact") export("RelevantDeviation") export("reload.or.compute") export("relrisk") export("relriskHeat") export("relriskHeat.ppp") export("relrisk.ppp") export("rename.fv") export("resolve.2D.kernel") export("resolveEinfo") export("resolve.foxall.window") export("resolve.lambda") export("resolve.lambdacross") export("resolve.lambdacross.ppp") export("resolve.lambda.ppp") export("resolve.reciplambda") export("resolve.reciplambda.ppp") export("rho2hat") export("rhohat") export("rhohatCalc") export("rhohatEngine") export("rhohat.ppp") export("rhohat.quad") export("rmax.Rigid") export("rmax.Ripley") export("rmax.rule") export("rmax.Trans") export("roc") export("rocData") export("rocModel") export("roc.ppp") export("rose") export("roseContinuous") export("rose.default") export("rose.density") export("rose.fv") export("rose.histogram") export("rotmean") export("scanBinomLRTS") export("scanLRTS") export("scanmeasure") export("scanmeasure.im") export("scanmeasure.ppp") export("scanPoisLRTS") export("scan.test") export("sdr") export("sdr.ppp") export("sdrPredict") export("second.moment.calc") export("second.moment.engine") export("segregation.test") export("segregation.test.ppp") export("sewpcf") export("sewsmod") export("sharpen") export("sharpen.ppp") export("shift.quadrattest") export("simulate.rhohat") export("simulrecipe") export("Smooth") export("smoothcrossEngine") export("Smoothfun") export("Smoothfun.ppp") export("Smooth.fv") export("SmoothHeat") export("SmoothHeat.im") export("SmoothHeat.ppp") export("Smooth.im") export("smoothpointsEngine") export("Smooth.ppp") export("Smooth.solist") export("Smooth.ssf") export("spatcov") export("spatialcdf") export("spatialCDFframe") export("spatialCDFtest") export("spatialCDFtestCalc") export("spatialCovariateEvidence") export("spatialCovariateEvidence.exactppm") export("spatialCovariateEvidence.ppp") export("SpatialMedian") export("SpatialMedian.ppp") export("SpatialQuantile") export("SpatialQuantile.ppp") export("sphere.volume") export("[.ssf") export("ssf") export("StieltjesCalc.fv") export("stienen") export("stienenSet") export("studpermu.test") export("subspaceDistance") export("summary.bw.optim") export("summary.envelope") export("Summary.fasp") export("Summary.fv") export("summary.ssf") export("thresholdCI") export("thresholdSelect") export("transect.im") export("Tstat") export("tweak.fv.entry") export("tweak.ratfv.entry") export("twostage.envelope") export("twostage.test") export("unmark.ssf") export("updateData") export("updateData.default") export("validate2Dkernel") export("validate.angles") export("validate.weights") export("vanilla.fv") export("varblock") export("Vmark") export("weightedclosepairs") export("which.max.im") export("Window.quadrattest") export("with.fv") export("with.ssf") export("X2testEngine") # ....... Special cases ........... S3method("Complex", "fasp") S3method("Complex", "fv") S3method("Math", "fasp") S3method("Math", "fv") S3method("Ops", "fasp") S3method("Ops", "fv") S3method("Summary", "fasp") S3method("Summary", "fv") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("as.data.frame", "bw.optim") S3method("as.data.frame", "envelope") S3method("as.data.frame", "fv") S3method("as.function", "fv") S3method("as.function", "rhohat") S3method("as.function", "ssf") S3method("as.fv", "bw.optim") S3method("as.fv", "data.frame") S3method("as.fv", "fasp") S3method("as.fv", "fv") S3method("as.fv", "matrix") S3method("as.im", "scan.test") S3method("as.im", "ssf") S3method("as.owin", "quadrattest") S3method("as.ppp", "ssf") S3method("as.tess", "quadrattest") S3method("auc", "ppp") S3method("berman.test", "ppp") S3method("blurHeat", "im") S3method("bw.abram", "ppp") S3method("bw.relrisk", "ppp") S3method("cbind", "fv") S3method("cdf.test", "ppp") S3method("collapse", "anylist") S3method("collapse", "fv") S3method("compatible", "fasp") S3method("compatible", "fv") S3method("compatible", "rat") S3method("contour", "ssf") S3method("densityAdaptiveKernel", "ppp") S3method("densityAdaptiveKernel", "ppplist") S3method("densityAdaptiveKernel", "splitppp") S3method("densityfun", "ppp") S3method("densityHeat", "ppp") S3method("density", "ppp") S3method("density", "ppplist") S3method("density", "psp") S3method("density", "splitppp") S3method("densityVoronoi", "ppp") S3method("deriv", "fv") S3method("dim", "fasp") S3method("dimnames", "fasp") S3method("domain", "quadrattest") S3method("envelope", "envelope") S3method("envelope", "hasenvelope") S3method("envelope", "matrix") S3method("envelope", "pp3") S3method("envelope", "ppp") S3method("[", "fasp") S3method("formula", "fv") S3method("[", "fv") S3method("harmonise", "fv") S3method("harmonize", "fv") S3method("image", "ssf") S3method("integral", "fv") S3method("integral", "ssf") S3method("is.poisson", "exactppm") S3method("is.stationary", "exactppm") S3method("[", "localpcfmatrix") S3method("marks", "ssf") S3method("max", "ssf") S3method("min", "ssf") S3method("nnclean", "pp3") S3method("nnclean", "ppp") S3method("nndensity", "ppp") S3method("pairs", "im") S3method("pairs", "listof") S3method("pairs", "solist") S3method("pcf", "fasp") S3method("pcf", "fv") S3method("pcf", "ppp") S3method("plot", "bermantest") S3method("plot", "bw.frac") S3method("plot", "bw.optim") S3method("plot", "cdftest") S3method("plot", "envelope") S3method("plot", "fasp") S3method("plot", "fv") S3method("plot", "laslett") S3method("plot", "localpcfmatrix") S3method("plot", "plotpairsim") S3method("plot", "quadrattest") S3method("plot", "rho2hat") S3method("plot", "rhohat") S3method("plot", "scan.test") S3method("plot", "spatialcdf") S3method("plot", "ssf") S3method("plot", "studpermutest") S3method("pool", "anylist") S3method("pool", "envelope") S3method("pool", "fasp") S3method("pool", "fv") S3method("pool", "quadrattest") S3method("pool", "rat") S3method("predict", "exactppm") S3method("predict", "rho2hat") S3method("predict", "rhohat") S3method("print", "bw.frac") S3method("print", "bw.optim") S3method("print", "densityfun") S3method("print", "envelope") S3method("print", "exactppm") S3method("print", "fasp") S3method("print", "fv") S3method("print", "fvfun") S3method("print", "hasenvelope") S3method("print", "laslett") S3method("print", "localpcfmatrix") S3method("print", "plotpairsim") S3method("print", "quadrattest") S3method("print", "rat") S3method("print", "rho2hat") S3method("print", "rhohat") S3method("print", "Smoothfun") S3method("print", "ssf") S3method("print", "summary.bw.optim") S3method("print", "summary.ssf") S3method("quadrat.test", "ppp") S3method("quadrat.test", "quadratcount") S3method("quadrat.test", "splitppp") S3method("range", "ssf") S3method("[", "rat") S3method("relriskHeat", "ppp") S3method("relrisk", "ppp") S3method("resolve.lambdacross", "ppp") S3method("resolve.lambda", "ppp") S3method("resolve.reciplambda", "ppp") S3method("rhohat", "ppp") S3method("rhohat", "quad") S3method("roc", "ppp") S3method("rose", "default") S3method("rose", "density") S3method("rose", "fv") S3method("rose", "histogram") S3method("scanmeasure", "im") S3method("scanmeasure", "ppp") S3method("sdr", "ppp") S3method("segregation.test", "ppp") S3method("sharpen", "ppp") S3method("shift", "quadrattest") S3method("simulate", "rhohat") S3method("Smoothfun", "ppp") S3method("Smooth", "fv") S3method("SmoothHeat", "im") S3method("SmoothHeat", "ppp") S3method("Smooth", "im") S3method("Smooth", "ppp") S3method("Smooth", "solist") S3method("Smooth", "ssf") S3method("spatialCovariateEvidence", "exactppm") S3method("spatialCovariateEvidence", "ppp") S3method("SpatialMedian", "ppp") S3method("SpatialQuantile", "ppp") S3method("[", "ssf") S3method("StieltjesCalc", "fv") S3method("summary", "bw.optim") S3method("summary", "envelope") S3method("summary", "ssf") S3method("unmark", "ssf") S3method("updateData", "default") S3method("Window", "quadrattest") S3method("with", "fv") S3method("with", "ssf") # ......................................... # Assignment methods # ......................................... S3method("$<-", "fv") S3method("dimnames<-", "fasp") S3method("formula<-", "fv") S3method("[<-", "fv") S3method("marks<-", "ssf") S3method("names<-", "fv") # ......................................... # End of methods # ......................................... spatstat.explore/inst/0000755000176200001440000000000014634222661014555 5ustar liggesusersspatstat.explore/inst/CITATION0000755000176200001440000000357114611073330015713 0ustar liggesusersc( bibentry(bibtype = "Book", title = "Spatial Point Patterns: Methodology and Applications with {R}", author = c(person("Adrian", "Baddeley"), person("Ege", "Rubak"), person("Rolf", "Turner")), year = "2015", publisher = "Chapman and Hall/CRC Press", address = "London", isbn = 9781482210200, url = "https://www.routledge.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/p/book/9781482210200/", header = "To cite spatstat in publications, please use:" ), bibentry(bibtype = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = c(person("Adrian", "Baddeley"), person("Rolf", "Turner"), person("Jorge", "Mateu"), person("Andrew", "Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", doi = "10.18637/jss.v055.i11", header = "If you use hybrid models, please also cite:" ), bibentry(bibtype = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = c(person("Adrian", "Baddeley"), person("Rolf", "Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", doi = "10.18637/jss.v012.i06", header = "In survey articles, please also cite the original paper on spatstat:" ) ) spatstat.explore/inst/info/0000755000176200001440000000000014634222661015510 5ustar liggesusersspatstat.explore/inst/info/packagesizes.txt0000755000176200001440000000120714737444216020733 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2022-05-25" "3.0-0" 224 500 0 30319 6183 "2022-11-08" "3.0-4" 226 507 0 30638 6295 "2023-01-26" "3.0-6" 226 507 0 30644 6295 "2023-03-14" "3.1-0" 227 508 0 30724 6295 "2023-05-10" "3.2-0" 234 523 0 31541 6365 "2023-05-13" "3.2-1" 234 523 0 31561 6365 "2023-09-07" "3.2-3" 235 523 0 31638 6365 "2023-10-22" "3.2-5" 236 524 0 31769 6365 "2024-01-31" "3.2-6" 241 535 0 32147 6365 "2024-03-21" "3.2-7" 241 535 0 32149 6365 "2024-07-09" "3.3-1" 230 517 0 31571 6365 "2024-08-20" "3.3-2" 230 517 0 31571 6364 "2024-10-22" "3.3-3" 235 525 0 31903 6364 "2025-01-08" "3.3-4" 235 525 0 31907 6364 spatstat.explore/inst/doc/0000755000176200001440000000000014645353000015314 5ustar liggesusersspatstat.explore/inst/doc/packagesizes.txt0000755000176200001440000000120714737444216020545 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2022-05-25" "3.0-0" 224 500 0 30319 6183 "2022-11-08" "3.0-4" 226 507 0 30638 6295 "2023-01-26" "3.0-6" 226 507 0 30644 6295 "2023-03-14" "3.1-0" 227 508 0 30724 6295 "2023-05-10" "3.2-0" 234 523 0 31541 6365 "2023-05-13" "3.2-1" 234 523 0 31561 6365 "2023-09-07" "3.2-3" 235 523 0 31638 6365 "2023-10-22" "3.2-5" 236 524 0 31769 6365 "2024-01-31" "3.2-6" 241 535 0 32147 6365 "2024-03-21" "3.2-7" 241 535 0 32149 6365 "2024-07-09" "3.3-1" 230 517 0 31571 6365 "2024-08-20" "3.3-2" 230 517 0 31571 6364 "2024-10-22" "3.3-3" 235 525 0 31903 6364 "2025-01-08" "3.3-4" 235 525 0 31907 6364 spatstat.explore/man/0000755000176200001440000000000014700374645014357 5ustar liggesusersspatstat.explore/man/plot.bermantest.Rd0000644000176200001440000000567114611073324017767 0ustar liggesusers\name{plot.bermantest} \alias{plot.bermantest} \title{Plot Result of Berman Test} \description{ Plot the result of Berman's test of goodness-of-fit } \usage{ \method{plot}{bermantest}(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"bermantest"} produced by \code{\link{berman.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.ecdf}}. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical distribution curve. } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the predicted (null) distribution curve. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"bermantest"}. An object of this class represents the outcome of Berman's test of goodness-of-fit of a spatial Poisson point process model, computed by \code{\link{berman.test}}. For the \emph{Z1} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z1")}), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, \eqn{\hat F}{Fhat}, and the predicted cumulative distribution function of the covariate under the model, \eqn{F_0}{F0}, both plotted against the value of the covariate. Two vertical lines show the mean values of these two distributions. If the model is correct, the two curves should be close; the test is based on comparing the two vertical lines. For the \emph{Z2} test (i.e. if \code{x} was computed using \code{berman.test( ,which="Z2")}), the plot displays the empirical cumulative distribution function of the values \eqn{U_i = F_0(Y_i)}{U[i] = F0(Y[i])} where \eqn{Y_i}{Y[i]} is the value of the covariate at the \eqn{i}-th data point. The diagonal line with equation \eqn{y=x} is also shown. Two vertical lines show the mean of the values \eqn{U_i}{U[i]} and the value \eqn{1/2}. If the model is correct, the two curves should be close. The test is based on comparing the two vertical lines. } \seealso{ \code{\link{berman.test}} } \examples{ plot(berman.test(cells, "x")) if(require("spatstat.model")) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(-x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- berman.test(fit0, xcoord, "Z1") # plot result of test plot(k, col="red", col0="green") # Z2 test k2 <- berman.test(fit0, xcoord, "Z2") plot(k2, col="red", col0="green") } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} \concept{Goodness-of-fit} spatstat.explore/man/Lcross.Rd0000644000176200001440000000560614611073323016110 0ustar liggesusers\name{Lcross} \alias{Lcross} \title{Multitype L-function (cross-type)} \description{ Calculates an estimate of the cross-type L-function for a multitype point pattern. } \usage{ Lcross(X, i, j, ..., from, to, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{correction,\dots}{ Arguments passed to \code{\link{Kcross}}. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \details{ The cross-type L-function is a transformation of the cross-type K-function, \deqn{L_{ij}(r) = \sqrt{\frac{K_{ij}(r)}{\pi}}}{Lij(r) = sqrt(Kij(r)/pi)} where \eqn{K_{ij}(r)}{Kij(r)} is the cross-type K-function from type \code{i} to type \code{j}. See \code{\link{Kcross}} for information about the cross-type K-function. The command \code{Lcross} first calls \code{\link{Kcross}} to compute the estimate of the cross-type K-function, and then applies the square root transformation. For a marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the L-function is \eqn{L_{ij}(r) = r}{Lij(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{ij}}{Lij} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{ij}}{Lij} has been estimated } \item{theo}{the theoretical value \eqn{L_{ij}(r) = r}{Lij(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}}{Lij} obtained by the edge corrections named. } \seealso{ \code{\link{Kcross}}, \code{\link{Ldot}}, \code{\link{Lest}} } \examples{ L <- Lcross(amacrine, "off", "on") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/radcumint.Rd0000644000176200001440000000364314643125462016637 0ustar liggesusers\name{radcumint} \alias{radcumint} \title{ Radial Cumulative Integral } \description{ Compute the cumulative integral of an image over increasing radial distances from the origin. } \usage{ radcumint(X, \dots, origin, Xname, result = c("fv", "im")) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}) with numerical or logical values. } \item{\dots}{ Ignored. } \item{origin}{ Optional. Origin about which the rotations should be performed. Either a numeric vector or a character string as described in the help for \code{\link[spatstat.geom]{shift.owin}}. } \item{Xname}{ Optional name for \code{X} to be used in the function labels. } \item{result}{ Character string specifying the kind of result required: either a function object or a pixel image. } } \details{ This command computes, for each possible distance \eqn{r}, the integral of the pixel values lying inside the disc of radius \eqn{r} centred at the origin. If \code{result="fv"} (the default) the result is a function object \code{f} of class \code{"fv"}. For each value of radius \eqn{r}, the function value \code{f(r)} is the integral of \code{X} over the disc of radius \eqn{r}. If \code{result="im"} the result is a pixel image, with the same dimensions as \code{X}. At a given pixel, the result is equal to \code{f(r)} where \code{r} is the distance from the given pixel to the origin. That is, at any given pixel, the resulting value is the integral of \code{X} over the disc centred at the origin whose boundary passes through the given pixel. } \value{ An object of class \code{"fv"} or \code{"im"}, with the same coordinate units as \code{X}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{rotmean}}, \code{\link{spatialcdf}} } \examples{ D <- density(redwood) plot(radcumint(D)) plot(radcumint(D, result="im")) } \keyword{spatial} \keyword{math} spatstat.explore/man/dg.envelope.Rd0000644000176200001440000001045214611073323017044 0ustar liggesusers\name{dg.envelope} \alias{dg.envelope} \title{ Global Envelopes for Dao-Genton Test } \description{ Computes the global envelopes corresponding to the Dao-Genton test of goodness-of-fit. } \usage{ dg.envelope(X, \dots, nsim = 19, nsimsub=nsim-1, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in the primary experiment. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test. The envelopes were developed in Baddeley et al (2015) and described in Baddeley, Rubak and Turner (2015). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.envelope}} in this case. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Unpublished manuscript. \baddrubaturnbook } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- dg.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- dg.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- dg.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/laslett.Rd0000644000176200001440000001373314643125461016321 0ustar liggesusers\name{laslett} \alias{laslett} \title{ Laslett's Transform } \description{ Apply Laslett's Transform to a spatial region, returning the original and transformed regions, and the original and transformed positions of the lower tangent points. This is a diagnostic for the Boolean model. } \usage{ laslett(X, \dots, verbose = FALSE, plotit = TRUE, discretise = FALSE, type=c("lower", "upper", "left", "right")) } \arguments{ \item{X}{ Spatial region to be transformed. A window (object of class \code{"owin"}) or a logical-valued pixel image (object of class \code{"im"}). } \item{\dots}{ Graphics arguments to control the plot (passed to \code{\link{plot.laslett}} when \code{plotit=TRUE}) or arguments determining the pixel resolution (passed to \code{\link[spatstat.geom]{as.mask}}). } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{plotit}{ Logical value indicating whether to plot the result. } \item{discretise}{ Logical value indicating whether polygonal windows should first be converted to pixel masks before the Laslett transform is computed. This should be set to \code{TRUE} for very complicated polygons. } \item{type}{ Type of tangent points to be detected. This also determines the direction of contraction in the set transformation. Default is \code{type="lower"}. } } \details{ This function finds the lower tangent points of the spatial region \code{X}, then applies Laslett's Transform to the space, and records the transformed positions of the lower tangent points. Laslett's transform is a diagnostic for the Boolean Model. A test of the Boolean model can be performed by applying a test of CSR to the transformed tangent points. See the Examples. The rationale is that, if the region \code{X} was generated by a Boolean model with convex grains, then the lower tangent points of \code{X}, when subjected to Laslett's transform, become a Poisson point process (Cressie, 1993, section 9.3.5; Molchanov, 1997; Barbour and Schmidt, 2001). Intuitively, Laslett's transform is a way to account for the fact that tangent points of \code{X} cannot occur \emph{inside} \code{X}. It treats the interior of \code{X} as empty space, and collapses this empty space so that only the \emph{exterior} of \code{X} remains. In this collapsed space, the tangent points are completely random. Formally, Laslett's transform is a random (i.e. data-dependent) spatial transformation which maps each spatial location \eqn{(x,y)} to a new location \eqn{(x',y)} at the same height \eqn{y}. The transformation is defined so that \eqn{x'} is the total \emph{uncovered} length of the line segment from \eqn{(0,y)} to \eqn{(x,y)}, that is, the total length of the parts of this segment that fall outside the region \code{X}. In more colourful terms, suppose we use an abacus to display a pixellated version of \code{X}. Each wire of the abacus represents one horizontal line in the pixel image. Each pixel lying \emph{outside} the region \code{X} is represented by a bead of the abacus; pixels \emph{inside} \code{X} are represented by the absence of a bead. Next we find any beads which are lower tangent points of \code{X}, and paint them green. Then Laslett's Transform is applied by pushing all beads to the left, as far as possible. The final locations of all the beads provide a new spatial region, inside which is the point pattern of tangent points (marked by the green-painted beads). If \code{plotit=TRUE} (the default), a before-and-after plot is generated, showing the region \code{X} and the tangent points before and after the transformation. This plot can also be generated by calling \code{plot(a)} where \code{a} is the object returned by the function \code{laslett}. If the argument \code{type} is given, then this determines the type of tangents that will be detected, and also the direction of contraction in Laslett's transform. The computation is performed by first rotating \code{X}, applying Laslett's transform for lower tangent points, then rotating back. There are separate algorithms for polygonal windows and pixellated windows (binary masks). The polygonal algorithm may be slow for very complicated polygons. If this happens, setting \code{discretise=TRUE} will convert the polygonal window to a binary mask and invoke the pixel raster algorithm. } \value{ A list, which also belongs to the class \code{"laslett"} so that it can immediately be printed and plotted. The list elements are: \describe{ \item{oldX:}{the original dataset \code{X};} \item{TanOld:}{a point pattern, whose window is \code{Frame(X)}, containing the lower tangent points of \code{X};} \item{TanNew:}{a point pattern, whose window is the Laslett transform of \code{Frame(X)}, and which contains the Laslett-transformed positions of the tangent points;} \item{Rect:}{a rectangular window, which is the largest rectangle lying inside the transformed set;} \item{df:}{a data frame giving the locations of the tangent points before and after transformation. } \item{type:}{character string specifying the type of tangents.} } } \references{ Barbour, A.D. and Schmidt, V. (2001) On Laslett's Transform for the Boolean Model. \emph{Advances in Applied Probability} \bold{33}(1), 1--5. Cressie, N.A.C. (1993) \emph{Statistics for spatial data}, second edition. John Wiley and Sons. Molchanov, I. (1997) \emph{Statistics of the Boolean Model for Practitioners and Mathematicians}. Wiley. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{plot.laslett}} } \examples{ a <- laslett(heather$coarse) transformedHeather <- with(a, Window(TanNew)) plot(transformedHeather, invert=TRUE) with(a, clarkevans.test(TanNew[Rect], correction="D", nsim=39)) X <- discs(runifrect(15) \%mark\% 0.2, npoly=16) b <- laslett(X, type="left") b } \keyword{spatial} \keyword{manip} spatstat.explore/man/Gfox.Rd0000644000176200001440000001010114643125461015536 0ustar liggesusers\name{Gfox} \alias{Gfox} \alias{Jfox} \title{ Foxall's Distance Functions } \description{ Given a point pattern \code{X} and a spatial object \code{Y}, compute estimates of Foxall's \eqn{G} and \eqn{J} functions. } \usage{ Gfox(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W, \dots) Jfox(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), W, \dots, warn.trim=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) from which distances will be measured. } \item{Y}{ An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"} to which distances will be measured. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{Gfox(r)} or \eqn{Jfox(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{Gfox(r)} or \eqn{Jfox(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The distribution function will be estimated from data inside \code{W}. The default is \code{W=Frame(Y)} when \code{Y} is a window, and \code{W=Window(Y)} otherwise. } \item{\dots}{ Extra arguments affecting the discretisation of distances. These arguments are ignored by \code{Gfox}, but \code{Jfox} passes them to \code{\link{Hest}} to determine the discretisation of the spatial domain. } \item{warn.trim}{ Logical value indicating whether a warning should be issued by \code{Jfox} when the window of \code{X} had to be trimmed in order to be a subset of the frame of \code{Y}. } } \details{ Given a point pattern \code{X} and another spatial object \code{Y}, these functions compute two nonparametric measures of association between \code{X} and \code{Y}, introduced by Foxall (Foxall and Baddeley, 2002). Let the random variable \eqn{R} be the distance from a typical point of \code{X} to the object \code{Y}. Foxall's \eqn{G}-function is the cumulative distribution function of \eqn{R}: \deqn{G(r) = P(R \le r)}{P(R <= r)} Let the random variable \eqn{S} be the distance from a \emph{fixed} point in space to the object \code{Y}. The cumulative distribution function of \eqn{S} is the (unconditional) spherical contact distribution function \deqn{H(r) = P(S \le r)}{H(r) = P(S <= r)} which is computed by \code{\link{Hest}}. Foxall's \eqn{J}-function is the ratio \deqn{ J(r) = \frac{1-G(r)}{1-H(r)} }{ J(r) = (1-G(r))/(1-H(r)) } For further interpretation, see Foxall and Baddeley (2002). Accuracy of \code{Jfox} depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link[spatstat.geom]{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ A function value table (object of class \code{"fv"}) which can be printed, plotted, or converted to a data frame of values. } \references{ Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. } \seealso{ \code{\link{Gest}}, \code{\link{Hest}}, \code{\link{Jest}}, \code{\link{Fest}} } \examples{ X <- copper$SouthPoints Y <- copper$SouthLines G <- Gfox(X,Y) J <- Jfox(X,Y, correction="km") \testonly{ J <- Jfox(X,Y, correction="km", eps=1) } } \author{Rob Foxall and \adrian } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/density.psp.Rd0000644000176200001440000000711014643125461017121 0ustar liggesusers\name{density.psp} \alias{density.psp} \title{Kernel Smoothing of Line Segment Pattern} \description{ Compute a kernel smoothed intensity function from a line segment pattern. } \usage{ \method{density}{psp}(x, sigma, \dots, weights=NULL, edge=TRUE, method=c("FFT", "C", "interpreted"), at=NULL) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}) to be smoothed. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Extra arguments, including arguments passed to \code{\link[spatstat.geom]{as.mask}} to determine the resolution of the resulting image. } \item{weights}{ Optional. Numerical weights for each line segment. A numeric vector, of length equal to the number of segments in \code{x}. } \item{edge}{ Logical flag indicating whether to apply edge correction. } \item{method}{ Character string (partially matched) specifying the method of computation. Option \code{"FFT"} is the fastest, while \code{"C"} is the most accurate. } \item{at}{ Optional. An object specifying the locations where density values should be computed. Either a window (object of class \code{"owin"}) or a point pattern (object of class \code{"ppp"} or \code{"lpp"}). } } \value{ A pixel image (object of class \code{"im"}) or a numeric vector. } \details{ This is the method for the generic function \code{\link{density}} for the class \code{"psp"} (line segment patterns). A kernel estimate of the intensity of the line segment pattern is computed. The result is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with the line segments. The result is computed as follows: \itemize{ \item if \code{method="FFT"} (the default), the line segments are discretised using \code{\link[spatstat.geom]{pixellate.psp}}, then the Fast Fourier Transform is used to calculate the convolution. This method is the fastest, but is slightly less accurate. Accuracy can be improved by increasing pixel resolution. \item if \code{method="C"} the exact value of the convolution at the centre of each pixel is computed analytically using \code{C} code; \item if \code{method="interpreted"}, the exact value of the convolution at the centre of each pixel is computed analytically using \code{R} code. This method is the slowest. } If \code{edge=TRUE} this result is adjusted for edge effects by dividing it by the convolution of the same Gaussian kernel with the observation window. If \code{weights} are given, then the contribution from line segment \code{i} is multiplied by the value of \code{weights[i]}. If the argument \code{at} is given, then it specifies the locations where density values should be computed. \itemize{ \item If \code{at} is a window, then the window is converted to a binary mask using the arguments \code{\dots}, and density values are computed at the centre of each pixel in this mask. The result is a pixel image. \item If \code{at} is a point pattern, then density values are computed at each point location, and the result is a numeric vector. } } \seealso{ \code{\link[spatstat.geom]{psp.object}}, \code{\link[spatstat.geom]{im.object}}, \code{\link{density}} } \examples{ L <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) D <- density(L, sigma=0.03) plot(D, main="density(L)") plot(L, add=TRUE) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/collapse.fv.Rd0000644000176200001440000000651314611073323017055 0ustar liggesusers\name{collapse.fv} \alias{collapse.fv} \alias{collapse.anylist} \title{ Collapse Several Function Tables into One } \description{ Combines several function tables (objects of class \code{"fv"}) into a single function table, merging columns that are identical and relabelling columns that are different. } \usage{ \method{collapse}{fv}(object, \dots, same = NULL, different = NULL) \method{collapse}{anylist}(object, \dots, same = NULL, different = NULL) } \arguments{ \item{object}{ An object of class \code{"fv"}, or a list of such objects. } \item{\dots}{ Additional objects of class \code{"fv"}. } \item{same}{ Character string or character vector specifying a column or columns of function values that are identical in different \code{"fv"} objects. These columns will be included only once in the result. } \item{different}{ Character string or character vector specifying a column or columns of function values, that are different in different \code{"fv"} objects. Each of these columns of data will be included, with labels that distinguish them from each other. } } \details{ This is a method for the generic function \code{\link[nlme]{collapse}}. It combines the data in several function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) to make a single function table. It is essentially a smart wrapper for \code{\link{cbind.fv}}. A typical application is to calculate the same summary statistic (such as the \eqn{K} function) for different point patterns, and then to use \code{collapse.fv} to combine the results into a single object that can easily be plotted. See the Examples. The arguments \code{object} and \code{\dots} should be function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) that are compatible in the sense that they have the same values of the function argument. (This can be ensured by applying \code{\link{harmonise.fv}} to them.) The argument \code{same} identifies any columns that are present in some or all of the function tables, and which are known to contain exactly the same values in each table that includes them. This column or columns will be included only once in the result. The argument \code{different} identifies any columns that are present in some or all of the function tables, and which may contain different numerical values in different tables. Each of these columns will be included, with labels to distinguish them. Columns that are not named in \code{same} or \code{different} will not be included. The function argument is always included and does not need to be specified. The arguments \code{same} and \code{different} can be \code{NULL}, or they can be character vectors containing the names of columns of \code{object}. The argument \code{different} can be one of the abbreviations recognised by \code{\link{fvnames}}. } \value{ Object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}} } \examples{ # generate simulated data X <- replicate(3, rpoispp(100), simplify=FALSE) names(X) <- paste("Simulation", 1:3) # compute K function estimates Klist <- anylapply(X, Kest) # collapse K <- collapse(Klist, same="theo", different="iso") K } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.explore/man/pool.anylist.Rd0000644000176200001440000000235614643125462017304 0ustar liggesusers\name{pool.anylist} \alias{pool.anylist} \title{ Pool Data from a List of Objects } \description{ Pool the data from the objects in a list. } \usage{ \method{pool}{anylist}(x, ...) } \arguments{ \item{x}{ A list, belonging to the class \code{"anylist"}, containing objects that can be pooled. } \item{\dots}{ Optional additional objects which can be pooled with the elements of \code{x}. } } \details{ The function \code{\link{pool}} is generic. Its purpose is to combine data from several objects of the same type (typically computed from different datasets) into a common, pooled estimate. The function \code{pool.anyist} is the method for the class \code{"anylist"}. It is used when the objects to be pooled are given in a list \code{x}. Each of the elements of the list \code{x}, and each of the subsequent arguments \code{\dots} if provided, must be an object of the same class. } \value{ An object of the same class as each of the entries in \code{x}. } \seealso{ \code{\link[spatstat.geom]{anylist}}, \code{\link{pool}}. } \examples{ Keach <- anylapply(waterstriders, Kest, ratio=TRUE, correction="iso") K <- pool(Keach) } \author{\spatstatAuthors.} \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pool.fasp.Rd0000644000176200001440000000345414611073324016544 0ustar liggesusers\name{pool.fasp} \alias{pool.fasp} \title{ Pool Data from Several Function Arrays } \description{ Pool the simulation data from several function arrays (objects of class \code{"fasp"}) and compute a new function array. } \usage{ \method{pool}{fasp}(...) } \arguments{ \item{\dots}{ Objects of class \code{"fasp"}. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fasp"} of function arrays. It is used to combine the simulation data from several arrays of simulation envelopes and to compute a new array of envelopes based on the combined data. Each of the arguments \code{\dots} must be a function array (object of class \code{"fasp"}) containing simulation envelopes. This is typically created by running the command \code{\link{alltypes}} with the arguments \code{envelope=TRUE} and \code{savefuns=TRUE}. This ensures that each object is an array of simulation envelopes, and that each envelope contains the simulated data (summary function values) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new array of envelopes is computed from the combined set of simulations. Warnings or errors will be issued if the objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of class \code{"fasp"}. } \seealso{ \code{\link{fasp}}, \code{\link{alltypes}}, \code{\link{pool.envelope}}, \code{\link{pool}} } \examples{ A1 <- alltypes(amacrine,"K",nsim=9,envelope=TRUE,savefuns=TRUE) A2 <- alltypes(amacrine,"K",nsim=10,envelope=TRUE,savefuns=TRUE) pool(A1, A2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.explore/man/compileCDF.Rd0000644000176200001440000000633114611073323016604 0ustar liggesusers\name{compileCDF} \alias{compileCDF} \title{ Generic Calculation of Cumulative Distribution Function of Distances } \description{ A low-level function which calculates the estimated cumulative distribution function of a distance variable. } \usage{ compileCDF(D, B, r, \dots, han.denom=NULL, check=TRUE) } \arguments{ \item{D}{ A vector giving the distances from each data point to the target. } \item{B}{ A vector giving the distances from each data point to the window boundary, or censoring distances. } \item{r}{ An equally spaced, finely spaced sequence of distance values at which the CDF should be estimated. } \item{\dots}{ Ignored. } \item{han.denom}{ Denominator for the Hanisch-Chiu-Stoyan estimator. A single number, or a numeric vector with the same length as \code{r}. } \item{check}{ Logical value specifying whether to check validity of the data, for example, that the vectors \code{D} and \code{B} have the same length, and contain non-negative numbers. } } \details{ This low-level function calculates estimates of the cumulative distribution function \deqn{F(r) = P(D \le r)}{F(r) = P(D <= r)} of a distance variable \eqn{D}, given a vector of observed values of \eqn{D} and other information. Examples of this concept include the empty space distance function computed by \code{\link{Fest}} and the nearest-neighbour distance distribution function \code{\link{Gest}}. This function \code{compileCDF} and its siblings \code{\link{compileK}} and \code{\link{compilepcf}} are useful for code development and for teaching, because they perform a common task, and do the housekeeping required to make an object of class \code{"fv"} that represents the estimated function. However, they are not very efficient. The argument \code{D} should be a numeric vector of shortest distances measured from each \sQuote{query} point to the \sQuote{target} set. The argument \code{B} should be a numeric vector of shortest distances measured from each \sQuote{query} point to the boundary of the window of observation. All entries of \code{D} and \code{B} should be non-negative. \code{compileCDF} calculates estimates of the cumulative distribution function \eqn{F(r)} using the border method (reduced sample estimator), the Kaplan-Meier estimator and, if \code{han.denom} is given, the Hanisch-Chiu-Stoyan estimator. See Chapter 8 of Baddeley, Rubak and Turner (2015). The result is an object of class \code{"fv"} representing the estimated function. Additional columns (such as a column giving the theoretical value) must be added by the user, with the aid of \code{\link{bind.fv}}. } \value{ An object of class \code{"fv"} representing the estimated function. } \author{ \adrian } \seealso{ \code{\link{compileK}}. \code{\link{bind.fv}} to add more columns. } \references{ \baddrubaturnbook } \examples{ ## Equivalent to Gest(japanesepines) X <- japanesepines D <- nndist(X) B <- bdist.points(X) r <- seq(0, 0.25, by=0.01) H <- eroded.areas(Window(X), r) G <- compileCDF(D=D, B=B, r=r, han.denom=H) G <- rebadge.fv(G, new.fname="G", new.ylab=quote(G(r))) plot(G) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bind.fv.Rd0000644000176200001440000001041314611073323016161 0ustar liggesusers\name{bind.fv} \alias{bind.fv} \alias{cbind.fv} \title{ Combine Function Value Tables } \description{ Advanced Use Only. Combine objects of class \code{"fv"}, or glue extra columns of data onto an existing \code{"fv"} object. } \usage{ \method{cbind}{fv}(...) bind.fv(x, y, labl = NULL, desc = NULL, preferred = NULL, clip=FALSE) } \arguments{ \item{\dots}{ Any number of arguments, which are objects of class \code{"fv"}, or other data. See Details. } \item{x}{ An object of class \code{"fv"}. } \item{y}{ Either an object of class \code{"fv"}, a data frame, or a function. See Details. } \item{labl}{ Plot labels (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{desc}{ Descriptions (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{preferred}{ Character string specifying the column which is to be the new recommended value of the function. } \item{clip}{ Logical value indicating whether each object must have exactly the same domain, that is, the same sequence of values of the function argument (\code{clip=FALSE}, the default) or whether objects with different domains are permissible and will be restricted to a common domain (\code{clip=TRUE}). } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. The function \code{cbind.fv} is a method for the generic \R function \code{\link{cbind}}. It combines any number of objects of class \code{"fv"} into a single object of class \code{"fv"}. The objects must be compatible, in the sense that they have identical values of the function argument. The function \code{bind.fv} is a lower level utility which glues additional columns onto an existing object \code{x} of class \code{"fv"}. It has three modes of use: \itemize{ \item If the additional dataset \code{y} is an object of class \code{"fv"}, then \code{x} and \code{y} must be compatible as described above. Then the columns of \code{y} that contain function values will be appended to the object \code{x}. \item Alternatively if \code{y} is a data frame, then \code{y} must have the same number of rows as \code{x}. All columns of \code{y} will be appended to \code{x}. \item Alternatively if \code{y} is a function in the \R language, then this function will be evaluated at the argument values stored in the object \code{x}, and these function values will be appended as a new column to \code{x}. } The arguments \code{labl} and \code{desc} provide plot labels and description strings (as described in \code{\link{fv}}) for the \emph{new} columns. If \code{y} is an object of class \code{"fv"} then \code{labl} and \code{desc} are optional, and default to the relevant entries in the object \code{y}. If \code{y} is a data frame then \code{labl} and \code{desc} should be provided, but there is a default. For additional flexibility, \code{cbind.fv} also accepts arguments which are data frames or functions. } \value{ An object of class \code{"fv"}. } \author{ \spatstatAuthors. } \examples{ K1 <- Kest(cells, correction="border") K2 <- Kest(cells, correction="iso") # remove column 'theo' to avoid duplication K2 <- K2[, names(K2) != "theo"] cbind(K1, K2) bind.fv(K1, K2, preferred="iso") # constrain border estimate to be monotonically increasing bm <- cumsum(c(0, pmax(0, diff(K1$border)))) bind.fv(K1, data.frame(bmono=bm), "\%s[bmo](r)", "monotone border-corrected estimate of \%s", "bmono") # add a column of values defined by a function cbind(K1, upper=function(r) { pi * r^2 + 0.1 }) } \seealso{ \code{\link{fv}} for creating objects of class \code{"fv"} from raw data. \code{\link{collapse.fv}} for combining several \code{"fv"} objects with similar columns. \code{\link{with.fv}} for evaluating expressions. \code{\link{fvnames}} for extracting and assigning the column names of standard components of \code{"fv"} objects. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{tweak.fv.entry} and \code{rebadge.fv}. } \keyword{spatial} \keyword{attribute} spatstat.explore/man/as.tess.Rd0000644000176200001440000000415014643125461016222 0ustar liggesusers\name{as.tess} \alias{as.tess.quadrattest} \title{Convert Data To Tessellation} \description{ Converts data specifying a tessellation, in any of several formats, into an object of class \code{"tess"}. } \usage{ \method{as.tess}{quadrattest}(X) } \arguments{ \item{X}{Data to be converted to a tessellation.} } \value{ An object of class \code{"tess"} specifying a tessellation. } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. This function converts data in any of several formats into an object of class \code{"tess"} for use by the \pkg{spatstat} package. The argument \code{X} may be \itemize{ \item an object of class \code{"tess"}. The object will be stripped of any extraneous attributes and returned. \item a pixel image (object of class \code{"im"}) with pixel values that are logical or factor values. Each level of the factor will determine a tile of the tessellation. \item a window (object of class \code{"owin"}). The result will be a tessellation consisting of a single tile. \item a set of quadrat counts (object of class \code{"quadratcount"}) returned by the command \code{\link[spatstat.geom]{quadratcount}}. The quadrats used to generate the counts will be extracted and returned as a tessellation. \item a quadrat test (object of class \code{"quadrattest"}) returned by the command \code{\link[spatstat.explore]{quadrat.test}}. The quadrats used to perform the test will be extracted and returned as a tessellation. \item a list of windows (objects of class \code{"owin"}) giving the tiles of the tessellation. } The function \code{as.tess} is generic, with methods for various classes, as listed above. } \seealso{ \code{\link[spatstat.geom]{tess}} } \examples{ h <- quadrat.test(nztrees, nx=4, ny=3) as.tess(h) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \concept{Tessellation} spatstat.explore/man/thresholdSelect.Rd0000644000176200001440000000506414611073325017777 0ustar liggesusers\name{thresholdSelect} \alias{thresholdSelect} \title{ Select Threshold to Convert Numerical Predictor to Binary Predictor } \description{ Given a point pattern and a spatial covariate that has some predictive value for the point pattern, determine the optimal value of the threshold for converting the covariate to a binary predictor. } \usage{ thresholdSelect(X, Z, method = c("Y", "LL", "AR", "t", "C"), Zname) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{Z}{ Spatial covariate with numerical values. Either a pixel image (object of class \code{"im"}), a distance function (object of class \code{"distfun"}) or a \code{function(x,y)} in the \R language. } \item{method}{ Character string (partially matched) specifying the method to be used to select the optimal threshold value. See Details. } \item{Zname}{ Optional character string giving a short name for the covariate. } } \details{ The spatial covariate \code{Z} is assumed to have some utility as a predictor of the point pattern \code{X}. This code chooses the best threshold value \eqn{v} for converting the numerical predictor \code{Z} to a binary predictor, for use in techniques such as Weights of Evidence. The best threshold is selected by maximising the criterion specified by the argument \code{method}. Options are: \itemize{ \item \code{method="Y"} (the default): the Youden criterion \item \code{method="LL"}: log-likelihood \item \code{method="AR"}: the Akman-Raftery criterion \item \code{method="t"}: the Studentised Weights-of-Evidence contrast \item \code{method="C"}: the Weights-of-Evidence contrast } These criteria are explained in Baddeley et al (2021). } \value{ A single numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the criterion used to select the threshold. } \references{ Baddeley, A., Brown, W., Milne, R.K., Nair, G., Rakshit, S., Lawrence, T., Phatak, A. and Fu, S.C. (2021) Optimal thresholding of predictors in mineral prospectivity analysis. \emph{Natural Resources Research} \bold{30} 923--969. } \author{ \adrian. } \seealso{ \code{\link{thresholdCI}} } \examples{ gold <- rescale(murchison$gold, 1000, "km") faults <- rescale(murchison$faults, 1000, "km") distfault <- distfun(faults) z <- thresholdSelect(gold, distfault) z plot(z, xlim=c(0, 20)) } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.explore/man/Kcross.Rd0000644000176200001440000001713514643125461016115 0ustar liggesusers\name{Kcross} \alias{Kcross} \title{ Multitype K Function (Cross-type) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ Kcross(X, i, j, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE, from, to ) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"periodic"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from,to}{ An alternative way to specify \code{i} and \code{j} respectively. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kcross} and its companions \code{\link{Kdot}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The ``cross-type'' (type \eqn{i} to type \eqn{j}) \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda_j K_{ij}(r)}{lambda[j] Kij(r)} equals the expected number of additional random points of type \eqn{j} within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda_j}{lambda[j]} is the intensity of the type \eqn{j} points, i.e. the expected number of points of type \eqn{j} per unit area. The function \eqn{K_{ij}}{Kij} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{ij}(r)}{Kij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}(r)}{Kij(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{K_{ij}(r)}{Kij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kcross}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # amacrine cells data K01 <- Kcross(amacrine, "off", "on") plot(K01) \testonly{ K01 <- Kcross(amacrine, "off", "on", ratio=TRUE) } # synthetic example: point pattern with marks 0 and 1 \donttest{ pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) K <- Kcross(pp, "0", "1") K <- Kcross(pp, 0, 1) # equivalent } } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/scanLRTS.Rd0000644000176200001440000001171114643125462016275 0ustar liggesusers\name{scanLRTS} \alias{scanLRTS} \title{ Likelihood Ratio Test Statistic for Scan Test } \description{ Calculate the Likelihood Ratio Test Statistic for the Scan Test, at each spatial location. } \usage{ scanLRTS(X, r, \dots, method = c("poisson", "binomial"), baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), saveopt = FALSE, Xmask = NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link[spatstat.geom]{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{saveopt}{ Logical value indicating to save the optimal value of \code{r} at each location. } \item{Xmask}{ Internal use only. } } \details{ This command computes, for all spatial locations \code{u}, the Likelihood Ratio Test Statistic \eqn{\Lambda(u)}{Lambda(u)} for a test of homogeneity at the location \eqn{u}, as described below. The result is a pixel image giving the values of \eqn{\Lambda(u)}{Lambda(u)} at each pixel. The \bold{maximum} value of \eqn{\Lambda(u)}{Lambda(u)} over all locations \eqn{u} is the \emph{scan statistic}, which is the basis of the \emph{scan test} performed by \code{\link{scan.test}}. \itemize{ \item If \code{method="poisson"} then the test statistic is based on Poisson likelihood. The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). At the spatial location \eqn{u}, the alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside the circle of radius \code{r} centred at \eqn{u}, and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside the circle, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the test statistic is based on binomial likelihood. The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that the circle of radius \code{r} centred at \eqn{u} has a higher proportion of points of the second type, than expected under the null hypothesis. } If \code{r} is a vector of more than one value for the radius, then the calculations described above are performed for every value of \code{r}. Then the maximum over \code{r} is taken for each spatial location \eqn{u}. The resulting pixel value of \code{scanLRTS} at a location \eqn{u} is the profile maximum of the Likelihood Ratio Test Statistic, that is, the maximum of the Likelihood Ratio Test Statistic for circles of all radii, centred at the same location \eqn{u}. If you have already performed a scan test using \code{\link{scan.test}}, the Likelihood Ratio Test Statistic can be extracted from the test result using the function \code{\link{as.im.scan.test}}. } \section{Warning: window size}{ Note that the result of \code{scanLRTS} is a pixel image on a larger window than the original window of \code{X}. The expanded window contains the centre of any circle of radius \code{r} that has nonempty intersection with the original window. } \value{ A pixel image (object of class \code{"im"}) whose pixel values are the values of the (profile) Likelihood Ratio Test Statistic at each spatial location. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{as.im.scan.test}} } \examples{ plot(scanLRTS(redwood, 0.1, method="poisson")) sc <- scanLRTS(chorley, 1, method="binomial", case="larynx") plot(sc) scanstatchorley <- max(sc) } \keyword{htest} \keyword{spatial} spatstat.explore/man/Gdot.Rd0000644000176200001440000002134214643125461015541 0ustar liggesusers\name{Gdot} \alias{Gdot} \title{ Multitype Nearest Neighbour Distance Function (i-to-any) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest other point of any type. } \usage{ Gdot(X, i, r=NULL, breaks=NULL, \dots, correction=c("km", "rs", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the distance distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{i\bullet}(r)}{Gi.(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{i\bullet}(r)}{Gi.(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest other point of any type. } \item{theo}{the theoretical value of \eqn{G_{i\bullet}(r)}{Gi.(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gdot} and its companions \code{\link{Gcross}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``dot-type'' (type \eqn{i} to any type) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest other point of the process, regardless of type. An estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the type \eqn{i} points were independent of all other points, then \eqn{G_{i\bullet}(r)}{Gi.(r)} would equal \eqn{G_{ii}(r)}{Gii(r)}, the nearest neighbour distance distribution function of the type \eqn{i} points alone. For a multitype Poisson point process with total intensity \eqn{\lambda}{lambda}, we have \deqn{G_{i\bullet}(r) = 1 - e^{ - \lambda \pi r^2} }{% Gi.(r) = 1 - exp( - lambda * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{i\bullet}}{Gi.} curves may suggest dependence of the type \eqn{i} points on the other points. This algorithm estimates the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{i\bullet}(r)}{Gi.(r)}. This estimate should be used with caution as \eqn{G_{i\bullet}(r)}{Gi.(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{i\bullet}}{Gi.}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{i\bullet}}{Gi.} as if it were an unbiased estimator of \eqn{G_{i\bullet}}{Gi.}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{i\bullet}}{Gi.} does not necessarily have a density. The reduced sample estimator of \eqn{G_{i\bullet}}{Gi.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}}{Gi.} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G0. <- Gdot(amacrine, "off") plot(G0.) # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gdot(pp, "0") G <- Gdot(pp, 0) # equivalent } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pcfinhom.Rd0000644000176200001440000001723714700374645016463 0ustar liggesusers\name{pcfinhom} \alias{pcfinhom} \title{ Inhomogeneous Pair Correlation Function } \description{ Estimates the inhomogeneous pair correlation function of a point pattern using kernel methods. } \usage{ pcfinhom(X, lambda = NULL, ..., r = NULL, kernel = "epanechnikov", bw = NULL, adjust.bw=1, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), renormalise = TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, reciplambda = NULL, sigma = NULL, adjust.sigma = 1, varcov = NULL, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for one-dimensional smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see \code{\link{bw.stoyan}}. } \item{adjust.bw}{ Numeric value. \code{bw} will be multiplied by this value. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see \code{\link{bw.stoyan}}. } \item{correction}{ Character string or character vector specifying the choice of edge correction. See \code{\link{Kest}} for explanation and options. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{\link{pcf.ppp}}. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{adjust.sigma}{ Numeric value. \code{sigma} will be multiplied by this value. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \details{ The inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} is a summary of the dependence between points in a spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda(x) * lambda(y) * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity function of the point process. For a Poisson point process with intensity function \eqn{\lambda}{lambda}, this probability is \eqn{p(r) = \lambda(x) \lambda(y)}{p(r) = lambda(x) * lambda(y)} so \eqn{g_{\rm inhom}(r) = 1}{ginhom(r) = 1}. The inhomogeneous pair correlation function is related to the inhomogeneous \eqn{K} function through \deqn{ g_{\rm inhom}(r) = \frac{K'_{\rm inhom}(r)}{2\pi r} }{ ginhom(r) = Kinhom'(r)/ ( 2 * pi * r) } where \eqn{K'_{\rm inhom}(r)}{Kinhom'(r)} is the derivative of \eqn{K_{\rm inhom}(r)}{Kinhom(r)}, the inhomogeneous \eqn{K} function. See \code{\link{Kinhom}} for information about \eqn{K_{\rm inhom}(r)}{Kinhom(r)}. The command \code{pcfinhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If \code{renormalise=TRUE} (the default), then the estimates are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{\rm inhom}(r)}{ginhom(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by Ripley isotropic correction } as required. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link[spatstat.geom]{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{Kinhom}} } \examples{ X <- residualspaper$Fig4b online <- interactive() if(!online) { ## reduce size of dataset X <- X[c(FALSE, TRUE)] } plot(pcfinhom(X, stoyan=0.2, sigma=0.1)) if(require("spatstat.model")) { if(online) { fit <- ppm(X ~ polynom(x,y,2)) } else { ## simpler model, faster computation fit <- ppm(X ~ x) } plot(pcfinhom(X, lambda=fit, normpower=2)) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/localKinhom.Rd0000644000176200001440000001332214611073324017076 0ustar liggesusers\name{localKinhom} \alias{localKinhom} \alias{localLinhom} \title{Inhomogeneous Neighbourhood Density Function} \description{ Computes spatially-weighted versions of the the local \eqn{K}-function or \eqn{L}-function. } \usage{ localKinhom(X, lambda, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL, update=TRUE, leaveoneout=TRUE) localLinhom(X, lambda, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambda}, if \code{lambda} is missing. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } } \details{ The functions \code{localKinhom} and \code{localLinhom} are inhomogeneous or weighted versions of the neighbourhood density function implemented in \code{\link{localK}} and \code{\link{localL}}. Given a spatial point pattern \code{X}, the inhomogeneous neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ L[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the inhomogeneous L function (see \code{\link{Linhom}}). By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- ponderosa # compute all the local L functions L <- localLinhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) } \author{ Mike Kuhn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/spatstat.explore-package.Rd0000644000176200001440000006417314653632347021575 0ustar liggesusers\name{spatstat.explore-package} \alias{spatstat.explore-package} \alias{spatstat.explore} \docType{package} \title{The spatstat.explore Package} \description{ The \pkg{spatstat.explore} package belongs to the \pkg{spatstat} family of packages. It contains the core functionality for statistical analysis and modelling of spatial data. } \details{ \pkg{spatstat} is a family of \R packages for the statistical analysis of spatial data. Its main focus is the analysis of spatial patterns of points in two-dimensional space. The original \pkg{spatstat} package has now been split into several sub-packages. This sub-package \pkg{spatstat.explore} contains the user-level functions that perform exploratory data analysis and nonparametric data analysis of spatial data. (The main exception is that functions for linear networks are in the separate sub-package \pkg{spatstat.linnet}.) } \section{Structure of the spatstat family}{ The orginal \pkg{spatstat} package grew to be very large. It has now been divided into several \bold{sub-packages}: \itemize{ \item \pkg{spatstat.utils} containing basic utilities \item \pkg{spatstat.sparse} containing linear algebra utilities \item \pkg{spatstat.data} containing datasets \item \pkg{spatstat.univar} containing functions for estimating probability distributions of random variables \item \pkg{spatstat.geom} containing geometrical objects and geometrical operations \item \pkg{spatstat.explore} containing the functionality for exploratory data analysis and nonparametric analysis of spatial data. \item \pkg{spatstat.model} containing the functionality for statistical modelling, model-fitting, formal statistical inference and informal model diagnostics. \item \pkg{spatstat.linnet} containing functions for spatial data on a linear network \item \pkg{spatstat}, which simply loads the other sub-packages listed above, and provides documentation. } When you install \pkg{spatstat}, these sub-packages are also installed. Then if you load the \pkg{spatstat} package by typing \code{library(spatstat)}, the other sub-packages listed above will automatically be loaded or imported. For an overview of all the functions available in the sub-packages of \pkg{spatstat}, see the help file for \code{"spatstat-package"} in the \pkg{spatstat} package. Additionally there are several \bold{extension packages:} \itemize{ \item \pkg{spatstat.gui} for interactive graphics \item \pkg{spatstat.local} for local likelihood (including geographically weighted regression) \item \pkg{spatstat.Knet} for additional, computationally efficient code for linear networks \item \pkg{spatstat.sphere} (under development) for spatial data on a sphere, including spatial data on the earth's surface } The extension packages must be installed separately and loaded explicitly if needed. They also have separate documentation. } \section{Overview of Functionality in \pkg{spatstat.explore}}{ The \pkg{spatstat} family of packages is designed to support a complete statistical analysis of spatial data. It supports \itemize{ \item creation, manipulation and plotting of point patterns; \item exploratory data analysis; \item spatial random sampling; \item simulation of point process models; \item parametric model-fitting; \item non-parametric smoothing and regression; \item formal inference (hypothesis tests, confidence intervals); \item model diagnostics. } For an overview, see the help file for \code{"spatstat-package"} in the \pkg{spatstat} package. Following is a list of the functionality provided in the \pkg{spatstat.explore} package only. \bold{To simulate a random point pattern:} Functions for generating random point patterns are now contained in the \pkg{spatstat.random} package. \bold{To interrogate a point pattern:} \tabular{ll}{ \code{\link[spatstat.explore]{density.ppp}} \tab kernel estimation of point pattern intensity\cr \code{\link[spatstat.explore]{densityHeat.ppp}} \tab diffusion kernel estimation of point pattern intensity\cr \code{\link[spatstat.explore]{Smooth.ppp}} \tab kernel smoothing of marks of point pattern\cr \code{\link[spatstat.explore]{sharpen.ppp}} \tab data sharpening\cr } \bold{Manipulation of pixel images:} An object of class \code{"im"} represents a pixel image. \tabular{ll}{ \code{\link[spatstat.explore]{blur}} \tab apply Gaussian blur to image\cr \code{\link[spatstat.explore]{Smooth.im}} \tab apply Gaussian blur to image\cr \code{\link[spatstat.explore]{transect.im}} \tab line transect of image \cr \code{\link[spatstat.geom]{pixelcentres}} \tab extract centres of pixels \cr \code{\link[spatstat.random]{rnoise}} \tab random pixel noise } \bold{Line segment patterns} An object of class \code{"psp"} represents a pattern of straight line segments. \tabular{ll}{ \code{\link[spatstat.explore]{density.psp}} \tab kernel smoothing of line segments\cr \code{\link[spatstat.random]{rpoisline}} \tab generate a realisation of the Poisson line process inside a window } \bold{Tessellations} An object of class \code{"tess"} represents a tessellation. \tabular{ll}{ \code{\link[spatstat.random]{rpoislinetess}} \tab generate tessellation using Poisson line process } \bold{Three-dimensional point patterns} An object of class \code{"pp3"} represents a three-dimensional point pattern in a rectangular box. The box is represented by an object of class \code{"box3"}. \tabular{ll}{ \code{\link[spatstat.random]{runifpoint3}} \tab generate uniform random points in 3-D \cr \code{\link[spatstat.random]{rpoispp3}} \tab generate Poisson random points in 3-D \cr \code{\link[spatstat.explore]{envelope.pp3}} \tab generate simulation envelopes for 3-D pattern \cr } \bold{Multi-dimensional space-time point patterns} An object of class \code{"ppx"} represents a point pattern in multi-dimensional space and/or time. \tabular{ll}{ \code{\link[spatstat.random]{runifpointx}} \tab generate uniform random points \cr \code{\link[spatstat.random]{rpoisppx}} \tab generate Poisson random points } \bold{Classical exploratory tools:} \tabular{ll}{ \code{\link[spatstat.explore]{clarkevans}} \tab Clark and Evans aggregation index \cr \code{\link[spatstat.explore]{fryplot}} \tab Fry plot \cr \code{\link[spatstat.explore]{miplot}} \tab Morisita Index plot } \bold{Smoothing:} \tabular{ll}{ \code{\link[spatstat.explore]{density.ppp}} \tab kernel smoothed density/intensity\cr \code{\link[spatstat.explore]{relrisk}} \tab kernel estimate of relative risk\cr \code{\link[spatstat.explore]{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link[spatstat.explore]{bw.diggle}} \tab cross-validated bandwidth selection for \code{\link[spatstat.explore]{density.ppp}}\cr \code{\link[spatstat.explore]{bw.ppl}} \tab likelihood cross-validated bandwidth selection for \code{\link[spatstat.explore]{density.ppp}}\cr \code{\link[spatstat.explore]{bw.CvL}} \tab Cronie-Van Lieshout bandwidth selection for density estimation\cr \code{\link[spatstat.explore]{bw.scott}} \tab Scott's rule of thumb for density estimation\cr \code{\link[spatstat.explore]{bw.abram.ppp}} \tab Abramson's rule for adaptive bandwidths\cr \code{\link[spatstat.explore]{bw.relrisk}} \tab cross-validated bandwidth selection for \code{\link[spatstat.explore]{relrisk}} \cr \code{\link[spatstat.explore]{bw.smoothppp}} \tab cross-validated bandwidth selection for \code{\link[spatstat.explore]{Smooth.ppp}} \cr \code{\link[spatstat.explore]{bw.frac}} \tab bandwidth selection using window geometry\cr \code{\link[spatstat.explore]{bw.stoyan}} \tab Stoyan's rule of thumb for bandwidth for \code{\link[spatstat.explore]{pcf}} } \bold{Modern exploratory tools:} \tabular{ll}{ \code{\link[spatstat.explore]{clusterset}} \tab Allard-Fraley feature detection \cr \code{\link[spatstat.explore]{nnclean}} \tab Byers-Raftery feature detection \cr \code{\link[spatstat.explore]{sharpen.ppp}} \tab Choi-Hall data sharpening \cr \code{\link[spatstat.explore]{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link[spatstat.explore]{rho2hat}} \tab Kernel estimate of effect of two covariates\cr \code{\link[spatstat.explore]{spatialcdf}} \tab Spatial cumulative distribution function\cr \code{\link[spatstat.explore]{roc}} \tab Receiver operating characteristic curve \cr \code{\link[spatstat.explore]{sdr}} \tab Sufficient Data Reduction \cr \code{\link[spatstat.explore]{thresholdSelect}} \tab optimal thresholding of a predictor } \bold{Summary statistics for a point pattern:} \tabular{ll}{ \code{\link[spatstat.explore]{Fest}} \tab empty space function \eqn{F} \cr \code{\link[spatstat.explore]{Gest}} \tab nearest neighbour distribution function \eqn{G} \cr \code{\link[spatstat.explore]{Jest}} \tab \eqn{J}-function \eqn{J = (1-G)/(1-F)} \cr \code{\link[spatstat.explore]{Kest}} \tab Ripley's \eqn{K}-function\cr \code{\link[spatstat.explore]{Lest}} \tab Besag \eqn{L}-function\cr \code{\link[spatstat.explore]{Tstat}} \tab Third order \eqn{T}-function \cr \code{\link[spatstat.explore]{allstats}} \tab all four functions \eqn{F}, \eqn{G}, \eqn{J}, \eqn{K} \cr \code{\link[spatstat.explore]{pcf}} \tab pair correlation function \cr \code{\link[spatstat.explore]{Kinhom}} \tab \eqn{K} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{Linhom}} \tab \eqn{L} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{pcfinhom}} \tab pair correlation for inhomogeneous patterns\cr \code{\link[spatstat.explore]{Finhom}} \tab \eqn{F} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{Ginhom}} \tab \eqn{G} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{Jinhom}} \tab \eqn{J} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{localL}} \tab Getis-Franklin neighbourhood density function\cr \code{\link[spatstat.explore]{localK}} \tab neighbourhood K-function\cr \code{\link[spatstat.explore]{localpcf}} \tab local pair correlation function\cr \code{\link[spatstat.explore]{localKinhom}} \tab local \eqn{K} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{localLinhom}} \tab local \eqn{L} for inhomogeneous point patterns \cr \code{\link[spatstat.explore]{localpcfinhom}} \tab local pair correlation for inhomogeneous patterns\cr \code{\link[spatstat.explore]{Ksector}} \tab Directional \eqn{K}-function\cr \code{\link[spatstat.explore]{Kscaled}} \tab locally scaled \eqn{K}-function \cr \code{\link[spatstat.explore]{Kest.fft}} \tab fast \eqn{K}-function using FFT for large datasets \cr \code{\link[spatstat.explore]{Kmeasure}} \tab reduced second moment measure \cr \code{\link[spatstat.explore]{envelope}} \tab simulation envelopes for a summary function \cr \code{\link[spatstat.explore]{varblock}} \tab variances and confidence intervals\cr \tab for a summary function \cr \code{\link[spatstat.explore]{lohboot}} \tab bootstrap for a summary function } Related facilities: \tabular{ll}{ \code{\link[spatstat.explore]{plot.fv}} \tab plot a summary function\cr \code{\link[spatstat.explore]{eval.fv}} \tab evaluate any expression involving summary functions\cr \code{\link[spatstat.explore]{harmonise.fv}} \tab make functions compatible \cr \code{\link[spatstat.explore]{eval.fasp}} \tab evaluate any expression involving an array of functions\cr \code{\link[spatstat.explore]{with.fv}} \tab evaluate an expression for a summary function\cr \code{\link[spatstat.explore]{Smooth.fv}} \tab apply smoothing to a summary function\cr \code{\link[spatstat.explore]{deriv.fv}} \tab calculate derivative of a summary function\cr \code{\link[spatstat.explore]{pool.fv}} \tab pool several estimates of a summary function\cr \code{\link[spatstat.explore]{density.ppp}} \tab kernel smoothed density\cr \code{\link[spatstat.explore]{densityHeat.ppp}} \tab diffusion kernel smoothed density\cr \code{\link[spatstat.explore]{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link[spatstat.explore]{relrisk}} \tab kernel estimate of relative risk\cr \code{\link[spatstat.explore]{sharpen.ppp}} \tab data sharpening \cr \code{\link[spatstat.random]{rknn}} \tab theoretical distribution of nearest neighbour distance } \bold{Summary statistics for a multitype point pattern:} A multitype point pattern is represented by an object \code{X} of class \code{"ppp"} such that \code{marks(X)} is a factor. \tabular{ll}{ \code{\link[spatstat.explore]{relrisk}} \tab kernel estimation of relative risk \cr \code{\link[spatstat.explore]{scan.test}} \tab spatial scan test of elevated risk \cr \code{\link[spatstat.explore]{Gcross},\link[spatstat.explore]{Gdot},\link[spatstat.explore]{Gmulti}} \tab multitype nearest neighbour distributions \eqn{G_{ij}, G_{i\bullet}}{G[i,j], G[i.]} \cr \code{\link[spatstat.explore]{Kcross},\link[spatstat.explore]{Kdot}, \link[spatstat.explore]{Kmulti}} \tab multitype \eqn{K}-functions \eqn{K_{ij}, K_{i\bullet}}{K[i,j], K[i.]} \cr \code{\link[spatstat.explore]{Lcross},\link[spatstat.explore]{Ldot}} \tab multitype \eqn{L}-functions \eqn{L_{ij}, L_{i\bullet}}{L[i,j], L[i.]} \cr \code{\link[spatstat.explore]{Jcross},\link[spatstat.explore]{Jdot},\link[spatstat.explore]{Jmulti}} \tab multitype \eqn{J}-functions \eqn{J_{ij}, J_{i\bullet}}{J[i,j],J[i.]} \cr \code{\link[spatstat.explore]{pcfcross}} \tab multitype pair correlation function \eqn{g_{ij}}{g[i,j]} \cr \code{\link[spatstat.explore]{pcfdot}} \tab multitype pair correlation function \eqn{g_{i\bullet}}{g[i.]} \cr \code{\link[spatstat.explore]{pcfmulti}} \tab general pair correlation function \cr \code{\link[spatstat.explore]{markconnect}} \tab marked connection function \eqn{p_{ij}}{p[i,j]} \cr \code{\link[spatstat.explore]{alltypes}} \tab estimates of the above for all \eqn{i,j} pairs \cr \code{\link[spatstat.explore]{Iest}} \tab multitype \eqn{I}-function\cr \code{\link[spatstat.explore]{Kcross.inhom},\link[spatstat.explore]{Kdot.inhom}} \tab inhomogeneous counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link[spatstat.explore]{Lcross.inhom},\link[spatstat.explore]{Ldot.inhom}} \tab inhomogeneous counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link[spatstat.explore]{pcfcross.inhom},\link[spatstat.explore]{pcfdot.inhom}} \tab inhomogeneous counterparts of \code{pcfcross}, \code{pcfdot} \cr \code{\link[spatstat.explore]{localKcross},\link[spatstat.explore]{localKdot}} \tab local counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link[spatstat.explore]{localLcross},\link[spatstat.explore]{localLdot}} \tab local counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link[spatstat.explore]{localKcross.inhom},\link[spatstat.explore]{localLcross.inhom}} \tab local counterparts of \code{Kcross.inhom}, \code{Lcross.inhom} } \bold{Summary statistics for a marked point pattern:} A marked point pattern is represented by an object \code{X} of class \code{"ppp"} with a component \code{X$marks}. The entries in the vector \code{X$marks} may be numeric, complex, string or any other atomic type. For numeric marks, there are the following functions: \tabular{ll}{ \code{\link[spatstat.explore]{markmean}} \tab smoothed local average of marks \cr \code{\link[spatstat.explore]{markvar}} \tab smoothed local variance of marks \cr \code{\link[spatstat.explore]{markcorr}} \tab mark correlation function \cr \code{\link[spatstat.explore]{markcrosscorr}} \tab mark cross-correlation function \cr \code{\link[spatstat.explore]{markvario}} \tab mark variogram \cr \code{\link[spatstat.explore]{markmarkscatter}} \tab mark-mark scatterplot \cr \code{\link[spatstat.explore]{Kmark}} \tab mark-weighted \eqn{K} function \cr \code{\link[spatstat.explore]{Emark}} \tab mark independence diagnostic \eqn{E(r)} \cr \code{\link[spatstat.explore]{Vmark}} \tab mark independence diagnostic \eqn{V(r)} \cr \code{\link[spatstat.explore]{nnmean}} \tab nearest neighbour mean index \cr \code{\link[spatstat.explore]{nnvario}} \tab nearest neighbour mark variance index } For marks of any type, there are the following: \tabular{ll}{ \code{\link[spatstat.explore]{Gmulti}} \tab multitype nearest neighbour distribution \cr \code{\link[spatstat.explore]{Kmulti}} \tab multitype \eqn{K}-function \cr \code{\link[spatstat.explore]{Jmulti}} \tab multitype \eqn{J}-function } Alternatively use \code{\link[spatstat.geom]{cut.ppp}} to convert a marked point pattern to a multitype point pattern. \bold{Programming tools:} \tabular{ll}{ \code{\link[spatstat.explore]{marktable}} \tab tabulate the marks of neighbours in a point pattern } \bold{Summary statistics for a three-dimensional point pattern:} These are for 3-dimensional point pattern objects (class \code{pp3}). \tabular{ll}{ \code{\link[spatstat.explore]{F3est}} \tab empty space function \eqn{F} \cr \code{\link[spatstat.explore]{G3est}} \tab nearest neighbour function \eqn{G} \cr \code{\link[spatstat.explore]{K3est}} \tab \eqn{K}-function \cr \code{\link[spatstat.explore]{pcf3est}} \tab pair correlation function } Related facilities: \tabular{ll}{ \code{\link[spatstat.explore]{envelope.pp3}} \tab simulation envelopes } \bold{Summary statistics for random sets:} These work for point patterns (class \code{ppp}), line segment patterns (class \code{psp}) or windows (class \code{owin}). \tabular{ll}{ \code{\link[spatstat.explore]{Hest}} \tab spherical contact distribution \eqn{H} \cr \code{\link[spatstat.explore]{Gfox}} \tab Foxall \eqn{G}-function \cr \code{\link[spatstat.explore]{Jfox}} \tab Foxall \eqn{J}-function } \bold{Model fitting} Functions for fitting point process models are now contained in the \pkg{spatstat.model} package. \bold{Simulation} There are many ways to generate a random point pattern, line segment pattern, pixel image or tessellation in \pkg{spatstat}. \bold{Random point patterns:} Functions for random generation are now contained in the \pkg{spatstat.random} package. See also \code{\link[spatstat.explore]{varblock}} for estimating the variance of a summary statistic by block resampling, and \code{\link[spatstat.explore]{lohboot}} for another bootstrap technique. \bold{Fitted point process models:} If you have fitted a point process model to a point pattern dataset, the fitted model can be simulated. Methods for simulating a fitted model are now contained in the \pkg{spatstat.model} package. \bold{Other random patterns:} Functions for random generation are now contained in the \pkg{spatstat.random} package. \bold{Simulation-based inference} \tabular{ll}{ \code{\link[spatstat.explore]{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link[spatstat.explore]{bits.envelope}} \tab critical envelope for balanced two-stage Monte Carlo test \cr \code{\link[spatstat.model]{qqplot.ppm}} \tab diagnostic plot for interpoint interaction \cr \code{\link[spatstat.explore]{scan.test}} \tab spatial scan statistic/test \cr \code{\link[spatstat.explore]{studpermu.test}} \tab studentised permutation test\cr \code{\link[spatstat.explore]{segregation.test}} \tab test of segregation of types } \bold{Hypothesis tests:} \tabular{ll}{ \code{\link[spatstat.explore]{quadrat.test}} \tab \eqn{\chi^2}{chi^2} goodness-of-fit test on quadrat counts \cr \code{\link[spatstat.explore]{clarkevans.test}} \tab Clark and Evans test \cr \code{\link[spatstat.explore]{cdf.test}} \tab Spatial distribution goodness-of-fit test\cr \code{\link[spatstat.explore]{berman.test}} \tab Berman's goodness-of-fit tests\cr \code{\link[spatstat.explore]{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link[spatstat.explore]{scan.test}} \tab spatial scan statistic/test \cr \code{\link[spatstat.explore]{dclf.test}} \tab Diggle-Cressie-Loosmore-Ford test \cr \code{\link[spatstat.explore]{mad.test}} \tab Mean Absolute Deviation test \cr \code{\link[spatstat.model]{anova.ppm}} \tab Analysis of Deviance for point process models } \bold{More recently-developed tests:} \tabular{ll}{ \code{\link[spatstat.explore]{dg.test}} \tab Dao-Genton test \cr \code{\link[spatstat.explore]{bits.test}} \tab Balanced independent two-stage test \cr \code{\link[spatstat.explore]{dclf.progress}} \tab Progress plot for DCLF test \cr \code{\link[spatstat.explore]{mad.progress}} \tab Progress plot for MAD test \cr } \bold{Model diagnostics:} Classical measures of model sensitivity such as leverage and influence, and classical model diagnostic tools such as residuals, partial residuals, and effect estimates, have been adapted to point process models. These capabilities are now provided in the \pkg{spatstat.model} package. \bold{Resampling and randomisation procedures} You can build your own tests based on randomisation and resampling using the following capabilities: \tabular{ll}{ \code{\link[spatstat.random]{quadratresample}} \tab block resampling \cr \code{\link[spatstat.random]{rshift}} \tab random shifting of (subsets of) points\cr \code{\link[spatstat.random]{rthin}} \tab random thinning } } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ \spatstatAuthors. } \section{Acknowledgements}{ Kasper Klitgaard Berthelsen, Ottmar Cronie, Tilman Davies, Julian Gilbey, Yongtao Guan, Ute Hahn, Kassel Hingee, Abdollah Jalilian, Marie-Colette van Lieshout, Greg McSwiggan, Tuomas Rajala, Suman Rakshit, Dominic Schuhmacher, Rasmus Waagepetersen and Hangsheng Wang made substantial contributions of code. For comments, corrections, bug alerts and suggestions, we thank Monsuru Adepeju, Corey Anderson, Ang Qi Wei, Ryan Arellano, Jens \ifelse{latex}{\out{{\AA}str{\" o}m}}{Astrom}, Robert Aue, Marcel Austenfeld, Sandro Azaele, Malissa Baddeley, Guy Bayegnak, Colin Beale, Melanie Bell, Thomas Bendtsen, Ricardo Bernhardt, Andrew Bevan, Brad Biggerstaff, Anders Bilgrau, Leanne Bischof, Christophe Biscio, Roger Bivand, Jose M. Blanco Moreno, Florent Bonneu, Jordan Brown, Ian Buller, Julian Burgos, Simon Byers, Ya-Mei Chang, Jianbao Chen, Igor Chernayavsky, Y.C. Chin, Bjarke Christensen, \ifelse{latex}{\out{Luc\'{\i{}}a}}{Lucia} Cobo Sanchez, Jean-Francois Coeurjolly, Kim Colyvas, Hadrien Commenges, Rochelle Constantine, Robin Corria Ainslie, Richard Cotton, Marcelino de la Cruz, Peter Dalgaard, Mario D'Antuono, Sourav Das, Peter Diggle, Patrick Donnelly, Ian Dryden, Stephen Eglen, Ahmed El-Gabbas, Belarmain Fandohan, Olivier Flores, David Ford, Peter Forbes, Shane Frank, Janet Franklin, Funwi-Gabga Neba, Oscar Garcia, Agnes Gault, Jonas Geldmann, Marc Genton, Shaaban Ghalandarayeshi, Jason Goldstick, Pavel Grabarnik, C. Graf, Ute Hahn, Andrew Hardegen, Martin \Bogsted Hansen, Martin Hazelton, Juha Heikkinen, Mandy Hering, Markus Herrmann, Maximilian Hesselbarth, Paul Hewson, Hamidreza Heydarian, Kurt Hornik, Philipp Hunziker, Jack Hywood, Ross Ihaka, \ifelse{latex}{\out{\u{C}enk I\c{c}\"{o}s}}{Cenk Icos}, Aruna Jammalamadaka, Robert John-Chandran, Devin Johnson, Mahdieh Khanmohammadi, Bob Klaver, Lily Kozmian-Ledward, Peter Kovesi, Mike Kuhn, Jeff Laake, Robert Lamb, \ifelse{latex}{\out{Fr\'{e}d\'{e}ric}}{Frederic} Lavancier, Tom Lawrence, Tomas Lazauskas, Jonathan Lee, George Leser, Angela Li, Li Haitao, George Limitsios, Andrew Lister, Nestor Luambua, Ben Madin, Martin Maechler, Kiran Marchikanti, Jeff Marcus, Robert Mark, Peter McCullagh, Monia Mahling, Jorge Mateu Mahiques, Ulf Mehlig, Frederico Mestre, Sebastian Wastl Meyer, Mi Xiangcheng, Lore De Middeleer, Robin Milne, Enrique Miranda, Jesper \Moller, Annie \ifelse{latex}{\out{Molli{\'e}}}{Mollie}, Ines Moncada, Mehdi Moradi, Virginia Morera Pujol, Erika Mudrak, Gopalan Nair, Nader Najari, Nicoletta Nava, Linda Stougaard Nielsen, Felipe Nunes, Jens Randel Nyengaard, Jens \Oehlschlaegel, Thierry Onkelinx, Sean O'Riordan, Evgeni Parilov, Jeff Picka, Nicolas Picard, Tim Pollington, Mike Porter, Sergiy Protsiv, Adrian Raftery, Ben Ramage, Pablo Ramon, Xavier Raynaud, Nicholas Read, Matt Reiter, Ian Renner, Tom Richardson, Brian Ripley, Ted Rosenbaum, Barry Rowlingson, Jason Rudokas, Tyler Rudolph, John Rudge, Christopher Ryan, Farzaneh Safavimanesh, Aila \Sarkka, Cody Schank, Katja Schladitz, Sebastian Schutte, Bryan Scott, Olivia Semboli, \ifelse{latex}{\out{Fran\c{c}ois S\'{e}m\'{e}curbe}}{Francois Semecurbe}, Vadim Shcherbakov, Shen Guochun, Shi Peijian, Harold-Jeffrey Ship, Tammy L Silva, Ida-Maria Sintorn, Yong Song, Malte Spiess, Mark Stevenson, Kaspar Stucki, Jan Sulavik, Michael Sumner, P. Surovy, Ben Taylor, Thordis Linda Thorarinsdottir, Leigh Torres, Berwin Turlach, Torben Tvedebrink, Kevin Ummer, Medha Uppala, Andrew van Burgel, Tobias Verbeke, Mikko Vihtakari, Alexendre Villers, Fabrice Vinatier, Maximilian Vogtland, Sasha Voss, Sven Wagner, Hao Wang, H. Wendrock, Jan Wild, Carl G. Witthoft, Selene Wong, Maxime Woringer, Luke Yates, Mike Zamboni and Achim Zeileis. } \keyword{spatial} \keyword{package} spatstat.explore/man/markmarkscatter.Rd0000644000176200001440000000416614643125461020044 0ustar liggesusers\name{markmarkscatter} \alias{markmarkscatter} \title{ Mark-Mark Scatter Plot } \description{ Generates the mark-mark scatter plot of a point pattern. } \usage{ markmarkscatter(X, rmax, \dots, col = NULL, symap = NULL, transform=I, jit=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}, \code{"pp3"}, \code{"lpp"} or \code{"ppx"}) with numeric marks. } \item{rmax}{ Maximum distance between pairs of points which contribute to the plot. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.geom]{plot.ppp}} to control the scatterplot. } \item{transform}{ Optional. A function which should be applied to the mark values. } \item{jit}{ Logical value indicating whether mark values should be randomly perturbed using \code{\link[base]{jitter}}. } \item{col}{ Optional. A vector of colour values, or a \code{\link[spatstat.geom]{colourmap}} to be used to portray the pairwise distance values. Ignored if \code{symap} is given. } \item{symap}{ Optional. A \code{\link[spatstat.geom]{symbolmap}} to be used to portray the pairwise distance values. Overrides \code{col}. } } \details{ The mark-mark scatter plot (Ballani et al, 2019) is a scatterplot of the mark values of all pairs of distinct points in \code{X} which are closer than the distance \code{rmax}. The dots in the scatterplot are coloured according to the pairwise distance between the two spatial points. The plot is augmented by three curves explained by Ballani et al (2019). If the marks only take a few different values, then it is usually appropriate to apply random perturbation (jitter) to the mark values, by setting \code{jit=TRUE}. } \value{ Null. } \references{ Ballani, F., Pommerening, A. and Stoyan, D. (2019) Mark-mark scatterplots improve pattern analysis in spatial plant ecology. \emph{Ecological Informatics} \bold{49}, 13--21. } \author{ Adrian Baddeley (coded from the description in Ballani et al.) } \examples{ markmarkscatter(longleaf, 10) markmarkscatter(spruces, 10, jit=TRUE) } \keyword{spatial} \keyword{hplot} spatstat.explore/man/densityHeat.ppp.Rd0000644000176200001440000001747114611073322017724 0ustar liggesusers\name{densityHeat.ppp} \alias{densityHeat.ppp} \title{ Diffusion Estimate of Point Pattern Intensity } \description{ Computes the diffusion estimate of the intensity of a point pattern. } \usage{ \method{densityHeat}{ppp}(x, sigma, \dots, weights=NULL, connect=8, symmetric=FALSE, sigmaX=NULL, k=1, show=FALSE, se=FALSE, at=c("pixels", "points"), leaveoneout = TRUE, extrapolate = FALSE, coarsen = TRUE, verbose=TRUE, internal=NULL) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth. A single number giving the equivalent standard deviation of the smoother. Alternatively, a pixel image (class \code{"im"}) or a \code{function(x,y)} giving the spatially-varying bandwidth. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{pixellate.ppp}} controlling the pixel resolution. } \item{weights}{ Optional numeric vector of weights associated with each point of \code{x}. } \item{connect}{ Grid connectivity: either 4 or 8. } \item{symmetric}{ Logical value indicating whether to \emph{force} the algorithm to use a symmetric random walk. } \item{sigmaX}{ Numeric vector of bandwidths, one associated with each data point in \code{x}. See Details. } \item{k}{ Integer. Calculations will be performed by repeatedly multiplying the current state by the \code{k}-step transition matrix. } \item{show}{ Logical value indicating whether to plot successive iterations. } \item{se}{ Logical value indicating whether to compute standard errors. } \item{at}{ Character string specifying whether to compute values at a grid of pixels (\code{at="pixels"}, the default) or at the data points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value specifying whether to compute a leave-one-out estimate at each data point, when \code{at="points"}. } \item{extrapolate}{ Logical value specifying whether to use Richardson extrapolation to improve the accuracy of the computation. } \item{coarsen}{ Logical value, controlling the calculation performed when \code{extrapolate=TRUE}. See Details. } \item{verbose}{ Logical value specifying whether to print progress reports. } \item{internal}{ Developer use only. } } \details{ This command computes a diffusion kernel estimate of point process intensity from the observed point pattern \code{x}. The function \code{\link{densityHeat}} is generic, with methods for point patterns in two dimensions (class \code{"ppp"}) and point patterns on a linear network (class \code{"lpp"}). The function \code{densityHeat.ppp} described here is the method for class \code{"ppp"}. Given a two-dimensional point pattern \code{x}, it computes a diffusion kernel estimate of the intensity of the point process which generated \code{x}. Diffusion kernel estimates were developed by Botev et al (2010), Barry and McIntyre (2011) and Baddeley et al (2022). Barry and McIntyre (2011) proposed an estimator for point process intensity based on a random walk on the pixel grid inside the observation window. Baddeley et al (2022) showed that the Barry-McIntyre method is a special case of the \emph{diffusion estimator} proposed by Botev et al (2010). The original Barry-McIntyre algorithm assumes a symmetric random walk (i.e. each possible transition has the same probability \eqn{p}) and requires a square pixel grid (i.e. equal spacing in the \eqn{x} and \eqn{y} directions). Their original algorithm is used if \code{symmetric=TRUE}. Use the \code{\dots} arguments to ensure a square grid: for example, the argument \code{eps} specifies a square grid with spacing \code{eps} units. The more general algorithm used here (Baddeley et al, 2022) does not require a square grid of pixels. If the pixel grid is not square, and if \code{symmetric=FALSE} (the default), then the random walk is not symmetric, in the sense that the probabilities of different jumps will be different, in order to ensure that the smoothing is isotropic. This implementation also includes two generalizations to the case of adaptive smoothing (Baddeley et al, 2022). In the first version of adaptive smoothing, the bandwidth is spatially-varying. The argument \code{sigma} should be a pixel image (class \code{"im"}) or a \code{function(x,y)} specifying the bandwidth at each spatial location. The smoothing is performed by solving the heat equation with spatially-varying parameters. In the second version of adaptive smoothing, each data point in \code{x} is smoothed using a separate bandwidth. The argument \code{sigmaX} should be a numeric vector specifying the bandwidth for each point of \code{x}. The smoothing is performed using the lagged arrival algorithm. The argument \code{sigma} can be omitted. If \code{extrapolate=FALSE} (the default), calculations are performed using the Euler scheme for the heat equation. If \code{extrapolate=TRUE}, the accuracy of the result will be improved by applying Richardson extrapolation (Baddeley et al, 2022, Section 4). After computing the intensity estimate using the Euler scheme on the desired pixel grid, another estimate is computed using the same method on another pixel grid, and the two estimates are combined by Richardson extrapolation to obtain a more accurate result. The second grid is coarser than the original grid if \code{coarsen=TRUE} (the default), and finer than the original grid if \code{coarsen=FALSE}. Setting \code{extrapolate=TRUE} increases computation time by 35\% if \code{coarsen=TRUE} and by 400\% if \code{coarsen=FALSE}. } \value{ Pixel image (object of class \code{"im"}) giving the estimated intensity of the point process. If \code{se=TRUE}, the result has an attribute \code{"se"} which is another pixel image giving the estimated standard error. If \code{at="points"} then the result is a numeric vector with one entry for each point of \code{x}. } \seealso{ \code{\link[spatstat.explore]{density.ppp}} for the usual kernel estimator, and \code{\link[spatstat.explore]{adaptive.density}} for the tessellation-based estimator. } \references{ Baddeley, A., Davies, T., Rakshit, S., Nair, G. and McSwiggan, G. (2022) Diffusion smoothing for spatial point patterns. \emph{Statistical Science} \bold{37} (1) 123--142. Barry, R.P. and McIntyre, J. (2011) Estimating animal densities and home range in regions with irregular boundaries and holes: a lattice-based alternative to the kernel density estimator. \emph{Ecological Modelling} \bold{222}, 1666--1672. Botev, Z.I., Grotowski, J.F. and Kroese, D.P. (2010) Kernel density estimation via diffusion. \emph{Annals of Statistics} \bold{38}, 2916--2957. } \author{ Adrian Baddeley and Tilman Davies. } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32) X <- runifpoint(25, letterR) Z <- densityHeat(X, 0.2) if(online) { plot(Z, main="Diffusion estimator") plot(X, add=TRUE, pch=16) integral(Z) # should equal 25 } Z <- densityHeat(X, 0.2, se=TRUE) Zse <- attr(Z, "se") if(online) plot(solist(estimate=Z, SE=Zse), main="") Zex <- densityHeat(X, 0.2, extrapolate=TRUE) ZS <- densityHeat(X, 0.2, symmetric=TRUE, eps=0.125) if(online) { plot(ZS, main="fixed bandwidth") plot(X, add=TRUE, pch=16) } sig <- function(x,y) { (x-1.5)/10 } ZZ <- densityHeat(X, sig) if(online) { plot(ZZ, main="adaptive (I)") plot(X, add=TRUE, pch=16) } sigX <- sig(X$x, X$y) AA <- densityHeat(X, sigmaX=sigX) if(online) { plot(AA, main="adaptive (II)") plot(X, add=TRUE, pch=16) } if(!online) spatstat.options(op) } \keyword{spatial} \keyword{smooth} spatstat.explore/man/rho2hat.Rd0000644000176200001440000000735314611073324016214 0ustar liggesusers\name{rho2hat} \alias{rho2hat} \title{ Smoothed Relative Density of Pairs of Covariate Values } \description{ Given a point pattern and two spatial covariates \eqn{Z_1}{Z1} and \eqn{Z_2}{Z2}, construct a smooth estimate of the relative risk of the pair \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } \usage{ rho2hat(object, cov1, cov2, ..., method=c("ratio", "reweight")) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}). } \item{cov1,cov2}{ The two covariates. Each argument is either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location, or one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} to smooth the scatterplots. } \item{method}{ Character string determining the smoothing method. See Details. } } \details{ This is a bivariate version of \code{\link{rhohat}}. If \code{object} is a point pattern, this command produces a smoothed version of the scatterplot of the values of the covariates \code{cov1} and \code{cov2} observed at the points of the point pattern. The covariates \code{cov1,cov2} must have continuous values. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z_1(u), Z_2(u)) \kappa(u) }{ lambda(u) = rho(Z1(u), Z2(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}, and \eqn{\rho(z_1,z_2)}{rho(z1, z2)} is a function to be estimated. The algorithm computes a smooth estimate of the function \eqn{\rho}{rho}. The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z_1, z_2)}{rho(z1, z2)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. \item If \code{method="reweight"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by applying density estimation to the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } } \value{ A pixel image (object of class \code{"im"}). Also belongs to the special class \code{"rho2hat"} which has a plot method. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. } \author{ \adrian } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rho2hat}} } \examples{ attach(bei.extra) plot(rho2hat(bei, elev, grad)) if(require("spatstat.model")) { fit <- ppm(bei ~elev, covariates=bei.extra) \donttest{ plot(rho2hat(fit, elev, grad)) } plot(rho2hat(fit, elev, grad, method="reweight")) } } \keyword{spatial} \keyword{models} spatstat.explore/man/bw.pplHeat.Rd0000644000176200001440000000450314611073322016641 0ustar liggesusers\name{bw.pplHeat} \alias{bw.pplHeat} \title{ Bandwidth Selection for Diffusion Smoother by Likelihood Cross-Validation } \description{ Selects an optimal bandwidth for diffusion smoothing by point process likelihood cross-validation. } \usage{ bw.pplHeat(X, \dots, srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose = TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{densityHeat.ppp}}. } \item{srange}{ Numeric vector of length 2 specifying a range of bandwidths to be considered. } \item{ns}{ Integer. Number of candidate bandwidths to be considered. } \item{sigma}{ Maximum smoothing bandwidth. A numeric value, or a pixel image, or a \code{function(x,y)}. Alternatively a numeric vector containing a sequence of candidate bandwidths. } \item{leaveoneout}{ Logical value specifying whether intensity values at data points should be estimated using the leave-one-out rule. } \item{verbose}{ Logical value specifying whether to print progress reports. } } \details{ This algorithm selects the optimal global bandwidth for kernel estimation of intensity for the dataset \code{X} using diffusion smoothing \code{\link{densityHeat.ppp}}. If \code{sigma} is a numeric value, the algorithm finds the optimal bandwidth \code{tau <= sigma}. If \code{sigma} is a pixel image or function, the algorithm finds the optimal fraction \code{0 < f <= 1} such that smoothing with \code{f * sigma} would be optimal. } \value{ A numerical value giving the selected bandwidth (if \code{sigma} was a numeric value) or the selected fraction of the maximum bandwidth (if \code{sigma} was a pixel image or function). The result also belongs to the class \code{"bw.optim"} which can be plotted. } \author{ Adrian Baddeley and Tilman Davies. } \seealso{ \code{\link{bw.CvLHeat}} for an alternative method. \code{\link{densityHeat.ppp}} } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32) f <- function(x,y) { dnorm(x, 2.3, 0.1) * dnorm(y, 2.0, 0.2) } X <- rpoint(15, f, win=letterR) plot(X) b <- bw.pplHeat(X, sigma=0.25) b plot(b) if(!online) spatstat.options(op) } \keyword{spatial} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/dclf.progress.Rd0000644000176200001440000001340614611073323017413 0ustar liggesusers\name{dclf.progress} \alias{dclf.progress} \alias{mad.progress} \alias{mctest.progress} \title{ Progress Plot of Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Diggle-Cressie-Loosmore-Ford test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.progress(X, \dots) mad.progress(X, \dots) mctest.progress(X, fun = Lest, \dots, exponent = 1, nrank = 1, interpolate = FALSE, alpha, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{mctest.progress} or to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify one-sided or two-sided envelopes, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle-Cressie-Loosmore-Ford test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dclf.progress} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. Similarly \code{mad.progress} performs \code{\link{mad.test}} using all possible intervals and returns the test statistic and critical value. More generally, \code{mctest.progress} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the Monte Carlo acceptance region (grey shading). The significance level for the Monte Carlo test is \code{nrank/(nsim+1)}. Note that \code{nsim} defaults to 99, so if the values of \code{nrank} and \code{nsim} are not given, the default is a test with significance level 0.01. If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. } \author{ \adrian , Andrew Hardegen, Tom Lawrence, Gopal Nair and Robin Milne. } \seealso{ \code{\link{dclf.test}} and \code{\link{mad.test}} for the tests. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. } \examples{ plot(dclf.progress(cells, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat.explore/man/eval.fasp.Rd0000644000176200001440000000571214611073324016521 0ustar liggesusers\name{eval.fasp} \alias{eval.fasp} \title{Evaluate Expression Involving Function Arrays} \description{ Evaluates any expression involving one or more function arrays (\code{fasp} objects) and returns another function array. } \usage{ eval.fasp(expr, envir, dotonly=TRUE) } \arguments{ \item{expr}{ An expression involving the names of objects of class \code{"fasp"}. } \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fasp"} objects to be used in the expression. } \item{dotonly}{Logical. Passed to \code{\link{eval.fv}}.} } \details{ This is a wrapper to make it easier to perform pointwise calculations with the arrays of summary functions used in spatial statistics. A function array (object of class \code{"fasp"}) can be regarded as a matrix whose entries are functions. Objects of this kind are returned by the command \code{\link[spatstat.explore]{alltypes}}. Suppose \code{X} is an object of class \code{"fasp"}. Then \code{eval.fasp(X+3)} effectively adds 3 to the value of every function in the array \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fasp"} which are compatible (for example the arrays must have the same dimensions). Then \code{eval.fasp(X + Y)} will add the corresponding functions in each cell of the arrays \code{X} and \code{Y}, and return the resulting array of functions. Suppose \code{X} is an object of class \code{"fasp"} and \code{f} is an object of class \code{"fv"}. Then \code{eval.fasp(X + f)} will add the function \code{f} to the functions in each cell of the array \code{X}, and return the resulting array of functions. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fasp"} or \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fasp} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fasp"}. The expression is then evaluated for each cell of the array using \code{\link{eval.fv}}. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fasp"} in the expression. All such objects must be compatible. } \value{ Another object of class \code{"fasp"}. } \seealso{ \code{\link{fasp.object}}, \code{\link[spatstat.explore]{Kest}} } \examples{ K <- alltypes(amacrine, "K") # expressions involving a fasp object eval.fasp(K + 3) L <- eval.fasp(sqrt(K/pi)) # expression involving two fasp objects D <- eval.fasp(K - L) # subtracting the unmarked K function from the cross-type K functions K0 <- Kest(unmark(amacrine)) DK <- eval.fasp(K - K0) ## Use of 'envir' S <- eval.fasp(1-G, list(G=alltypes(amacrine, 'G'))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.explore/man/Kmeasure.Rd0000644000176200001440000001605214643125461016422 0ustar liggesusers\name{Kmeasure} \alias{Kmeasure} \title{Reduced Second Moment Measure} \description{ Estimates the reduced second moment measure \eqn{\kappa}{Kappa} from a point pattern in a window of arbitrary shape. } \usage{ Kmeasure(X, sigma, edge=TRUE, \dots, varcov=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{\kappa}{Kappa} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{sigma}{ Standard deviation \eqn{\sigma}{sigma} of the Gaussian smoothing kernel. Incompatible with \code{varcov}. } \item{edge}{ Logical value indicating whether an edge correction should be applied. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{as.mask}} controlling the pixel resolution. } \item{varcov}{ Variance-covariance matrix of the Gaussian smoothing kernel. Incompatible with \code{sigma}. } } \value{ A real-valued pixel image (an object of class \code{"im"}, see \code{\link[spatstat.geom]{im.object}}) whose pixel values are estimates of the density of the reduced second moment measure at each location. } \details{ Given a point pattern dataset, this command computes an estimate of the reduced second moment measure \eqn{\kappa}{Kappa} of the point process. The result is a pixel image whose pixel values are estimates of the density of the reduced second moment measure. The reduced second moment measure \eqn{\kappa}{Kappa} can be regarded as a generalisation of the more familiar \eqn{K}-function. An estimate of \eqn{\kappa}{Kappa} derived from a spatial point pattern dataset can be useful in exploratory data analysis. Its advantage over the \eqn{K}-function is that it is also sensitive to anisotropy and directional effects. In a nutshell, the command \code{Kmeasure} computes a smoothed version of the \emph{Fry plot}. As explained under \code{\link{fryplot}}, the Fry plot is a scatterplot of the vectors joining all pairs of points in the pattern. The reduced second moment measure is (essentially) defined as the average of the Fry plot over different realisations of the point process. The command \code{Kmeasure} effectively smooths the Fry plot of a dataset to obtain an estimate of the reduced second moment measure. In formal terms, the reduced second moment measure \eqn{\kappa}{Kappa} of a stationary point process \eqn{X} is a measure defined on the two-dimensional plane such that, for a `typical' point \eqn{x} of the process, the expected number of other points \eqn{y} of the process such that the vector \eqn{y - x} lies in a region \eqn{A}, equals \eqn{\lambda \kappa(A)}{lambda * Kappa(A)}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K}-function is a special case. The function value \eqn{K(t)} is the value of the reduced second moment measure for the disc of radius \eqn{t} centred at the origin; that is, \eqn{K(t) = \kappa(b(0,t))}{K(t) = Kappa(b(0,t))}. The command \code{Kmeasure} computes an estimate of \eqn{\kappa}{Kappa} from a point pattern dataset \code{X}, which is assumed to be a realisation of a stationary point process, observed inside a known, bounded window. Marks are ignored. The algorithm approximates the point pattern and its window by binary pixel images, introduces a Gaussian smoothing kernel and uses the Fast Fourier Transform \code{\link{fft}} to form a density estimate of \eqn{\kappa}{Kappa}. The calculation corresponds to the edge correction known as the ``translation correction''. The Gaussian smoothing kernel may be specified by either of the arguments \code{sigma} or \code{varcov}. If \code{sigma} is a single number, this specifies an isotropic Gaussian kernel with standard deviation \code{sigma} on each coordinate axis. If \code{sigma} is a vector of two numbers, this specifies a Gaussian kernel with standard deviation \code{sigma[1]} on the \eqn{x} axis, standard deviation \code{sigma[2]} on the \eqn{y} axis, and zero correlation between the \eqn{x} and \eqn{y} axes. If \code{varcov} is given, this specifies the variance-covariance matrix of the Gaussian kernel. There do not seem to be any well-established rules for selecting the smoothing kernel in this context. The density estimate of \eqn{\kappa}{Kappa} is returned in the form of a real-valued pixel image. Pixel values are estimates of the normalised second moment density at the centre of the pixel. (The uniform Poisson process would have values identically equal to \eqn{1}.) The image \code{x} and \code{y} coordinates are on the same scale as vector displacements in the original point pattern window. The point \code{x=0, y=0} corresponds to the `typical point'. A peak in the image near \code{(0,0)} suggests clustering; a dip in the image near \code{(0,0)} suggests inhibition; peaks or dips at other positions suggest possible periodicity. If desired, the value of \eqn{\kappa(A)}{Kappa(A)} for a region \eqn{A} can be estimated by computing the integral of the pixel image over the domain \eqn{A}, i.e.\ summing the pixel values and multiplying by pixel area, using \code{\link[spatstat.geom]{integral.im}}. One possible application is to compute anisotropic counterparts of the \eqn{K}-function (in which the disc of radius \eqn{t} is replaced by another shape). See Examples. } \section{Warning}{ Some writers use the term \emph{reduced second moment measure} when they mean the \eqn{K}-function. This has caused confusion. As originally defined, the reduced second moment measure is a measure, obtained by modifying the second moment measure, while the \eqn{K}-function is a function obtained by evaluating this measure for discs of increasing radius. In \pkg{spatstat}, the \eqn{K}-function is computed by \code{\link{Kest}} and the reduced second moment measure is computed by \code{Kmeasure}. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{fryplot}}, \code{\link[spatstat.geom]{spatstat.options}}, \code{\link[spatstat.geom]{integral.im}}, \code{\link[spatstat.geom]{im.object}} } \examples{ plot(Kmeasure(cells, 0.05)) # shows pronounced dip around origin consistent with strong inhibition plot(Kmeasure(redwood, 0.03), col=grey(seq(1,0,length=32))) # shows peaks at several places, reflecting clustering and ?periodicity M <- Kmeasure(cells, 0.05) # evaluate measure on a sector W <- Window(M) ang <- as.im(atan2, W) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W) sector <- solutionset(ang > 0 & ang < 1 & rad < 0.6) integral.im(M[sector, drop=FALSE]) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/SmoothHeat.Rd0000644000176200001440000000172414700374645016725 0ustar liggesusers\name{SmoothHeat} \alias{SmoothHeat} \title{Spatial Smoothing of Data by Diffusion} \description{ Generic function to perform spatial smoothing of spatial data by diffusion. } \usage{ SmoothHeat(X, \dots) } \arguments{ \item{X}{Some kind of spatial data} \item{\dots}{Arguments passed to methods.} } \details{ This generic function calls an appropriate method to perform spatial smoothing on the spatial dataset \code{X} using diffusion. Methods for this function include \itemize{ \item \code{\link[spatstat.explore]{SmoothHeat.ppp}} for point patterns \item \code{\link[spatstat.explore]{SmoothHeat.im}} for pixel images. } } \seealso{ \code{\link[spatstat.explore]{SmoothHeat.ppp}}, \code{\link[spatstat.explore]{SmoothHeat.im}}. } \value{ An object containing smoothed values of the input data, in an appropriate format. See the documentation for the methods. } \author{ \adrian. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/bw.relriskHeatppp.Rd0000644000176200001440000000561314700420234020241 0ustar liggesusers\name{bw.relriskHeatppp} \alias{bw.relriskHeatppp} \title{ Bandwidth Selection for Relative Risk using Diffusion } \description{ Performs data-based bandwidth selection for the diffusion estimate of relative risk \code{\link[spatstat.explore]{relriskHeat.ppp}} using either likelihood cross-validation or least squares } \usage{ bw.relriskHeatppp(X, \dots, method = c("likelihood", "leastsquares"), weights = NULL, srange = NULL, ns = 16, sigma = NULL, leaveoneout = TRUE, verbose = TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link[spatstat.explore]{relriskHeat.ppp}}. } \item{method}{ Character string specifying the cross-validation method. Partially matched to \code{"likelihood"} for binary likelihood cross-validation or \code{"leastsquares"} for least squares cross-validation. } \item{weights}{ Optional numeric vector of weights associated with each point of \code{X}. } \item{srange}{ Numeric vector of length 2 specifying a range of bandwidths to be considered. } \item{ns}{ Integer. Number of candidate bandwidths to be considered. } \item{sigma}{ Maximum smoothing bandwidth. A numeric value, or a pixel image, or a \code{function(x,y)}. Alternatively a numeric vector containing a sequence of candidate bandwidths. } \item{leaveoneout}{ Logical value specifying whether intensity values at data points should be estimated using the leave-one-out rule. } \item{verbose}{ Logical value specifying whether to print progress reports. } } \details{ This algorithm selects the optimal global bandwidth for kernel estimation of relative risk for the dataset \code{X} using diffusion smoothing \code{\link[spatstat.explore]{relriskHeat}}. If \code{sigma} is a numeric value, the algorithm finds the optimal bandwidth \code{tau <= sigma}. If \code{sigma} is a pixel image or function, the algorithm finds the optimal fraction \code{0 < f <= 1} such that smoothing with \code{f * sigma} would be optimal. } \value{ A numerical value giving the selected bandwidth (if \code{sigma} was a numeric value) or the selected fraction of the maximum bandwidth (if \code{sigma} was a pixel image or function). The result also belongs to the class \code{"bw.optim"} which can be plotted. } \author{ \adrian, \tilman and Suman Rakshit. } \seealso{ \code{\link[spatstat.explore]{relriskHeat.ppp}} } \examples{ ## bovine tuberculosis data X <- subset(btb, select=spoligotype) if(interactive()) { smax <- 40 ns <- 16 dimyx <- NULL } else { ## reduce data and resolution to speed up X <- X[c(TRUE, rep(FALSE, 7))] smax <- 9 ns <- 8 dimyx <- 32 } b <- bw.relriskHeatppp(X, sigma=smax, ns=ns, dimyx=dimyx) b plot(b) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/dimhat.Rd0000644000176200001440000000267514611073324016115 0ustar liggesusers\name{dimhat} \alias{dimhat} \title{ Estimate Dimension of Central Subspace } \description{ Given the kernel matrix that characterises a central subspace, this function estimates the dimension of the subspace. } \usage{ dimhat(M) } \arguments{ \item{M}{ Kernel of subspace. A symmetric, non-negative definite, numeric matrix, typically obtained from \code{\link{sdr}}. } } \details{ This function computes the maximum descent estimate of the dimension of the central subspace with a given kernel matrix \code{M}. The matrix \code{M} should be the kernel matrix of a central subspace, which can be obtained from \code{\link{sdr}}. It must be a symmetric, non-negative-definite, numeric matrix. The algorithm finds the eigenvalues \eqn{\lambda_1 \ge \ldots \ge \lambda_n}{lambda[1] \ge ...\ge lambda[n]} of \eqn{M}, and then determines the index \eqn{k} for which \eqn{\lambda_k/\lambda_{k-1}}{lambda[k]/lambda[k-1]} is greatest. } \value{ A single integer giving the estimated dimension. } \seealso{ \code{\link{sdr}}, \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{array} \keyword{algebra} \keyword{multivariate} spatstat.explore/man/cdf.test.Rd0000644000176200001440000002316614650323373016364 0ustar liggesusers\name{cdf.test} \alias{cdf.test} \alias{cdf.test.ppp} \title{Spatial Distribution Test for Point Pattern or Point Process Model} \description{ Performs a test of goodness-of-fit of a point process model. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov test, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or Anderson-Darling test. For non-Poisson models, a Monte Carlo test is used. } \usage{ cdf.test(...) \method{cdf.test}{ppp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link[stats]{ks.test}} (from the \pkg{stats} package) or \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package) to control the test; and arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution. } \item{interpolate}{ Logical flag indicating whether to interpolate pixel images. If \code{interpolate=TRUE}, the value of the covariate at each point of \code{X} will be approximated by interpolating the nearby pixel values. If \code{interpolate=FALSE}, the nearest pixel value will be used. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } } \details{ These functions perform a goodness-of-fit test of a Poisson or Gibbs point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test, the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is performed using these test statistics. The function \code{cdf.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"}), then \code{cdf.test(X, \dots)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. \item If \code{model} is a fitted spatial logistic regression (object of class \code{"slrm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using a classical goodness-of-fit test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link[spatstat.univar]{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{stats::\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}} or \code{goftest::\link[goftest]{ad.test}}. This test was apparently first described (in the context of spatial data, and using Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson process, then a Monte Carlo test is performed, by generating \code{nsim} point patterns which are simulated realisations of the \code{model}, re-fitting the model to each simulated point pattern, and calculating the test statistic for each fitted model. The Monte Carlo \eqn{p} value is determined by comparing the simulated values of the test statistic with the value for the original data. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"cdftest"} for which there is a plot method \code{\link[spatstat.explore]{plot.cdftest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link[stats]{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"cdftest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{cdf.test} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{\adrian and \rolf } \seealso{ \code{\link[spatstat.explore]{plot.cdftest}}, \code{\link[spatstat.explore]{quadrat.test}}, \code{\link[spatstat.explore]{berman.test}}, \code{\link[stats]{ks.test}}, \code{\link[goftest]{cvm.test}}, \code{\link[goftest]{ad.test}}, \code{\link[spatstat.model]{ppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ op <- options(useFancyQuotes=FALSE) # test of CSR using x coordinate cdf.test(nztrees, "x") cdf.test(nztrees, "x", "cvm") cdf.test(nztrees, "x", "ad") # test of CSR using a function of x and y fun <- function(x,y){2* x + y} cdf.test(nztrees, fun) # test of CSR using an image covariate funimage <- as.im(fun, W=Window(nztrees)) cdf.test(nztrees, funimage) # multitype point pattern cdf.test(amacrine, "x") options(op) } \keyword{htest} \keyword{spatial} \concept{Goodness-of-fit} spatstat.explore/man/Gmulti.Rd0000644000176200001440000001724614643125461016115 0ustar liggesusers\name{Gmulti} \alias{Gmulti} \title{ Marked Nearest Neighbour Distance Function } \description{ For a marked point pattern, estimate the distribution of the distance from a typical point in subset \code{I} to the nearest point of subset \eqn{J}. } \usage{ Gmulti(X, I, J, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{G_{IJ}(r)}{GIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. } \item{J}{Subset of points in \code{X} to which distances are measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{IJ}(r)}{GIJ(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{IJ}(r)}{GIJ(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{IJ}(r)}{GIJ(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{IJ}(r)}{GIJ(r)} for a marked Poisson process with the same estimated intensity } } \details{ The function \code{Gmulti} generalises \code{\link{Gest}} (for unmarked point patterns) and \code{\link{Gdot}} and \code{\link{Gcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. This function computes an estimate of the cumulative distribution function \eqn{G_{IJ}(r)}{GIJ(r)} of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. This algorithm estimates the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{IJ}(r)}{GIJ(r)}. This estimate should be used with caution as \eqn{G_{IJ}(r)}{GIJ(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{IJ}}{GIJ}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{IJ}}{GIJ} as if it were an unbiased estimator of \eqn{G_{IJ}}{GIJ}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{G_{IJ}}{GIJ} does not necessarily have a density. The reduced sample estimator of \eqn{G_{IJ}}{GIJ} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{IJ}}{GIJ} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gdot}}, \code{\link{Gest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1, npoints(trees), by=50), ] } Gm <- Gmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Gm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/subspaceDistance.Rd0000644000176200001440000000263214611073325020121 0ustar liggesusers\name{subspaceDistance} \alias{subspaceDistance} \title{ Distance Between Linear Spaces } \description{ Evaluate the distance between two linear subspaces using the measure proposed by Li, Zha and Chiaromonte (2005). } \usage{ subspaceDistance(B0, B1) } \arguments{ \item{B0}{ Matrix whose columns are a basis for the first subspace. } \item{B1}{ Matrix whose columns are a basis for the second subspace. } } \details{ This algorithm calculates the maximum absolute value of the eigenvalues of \eqn{P1-P0} where \eqn{P0,P1} are the projection matrices onto the subspaces generated by \code{B0,B1}. This measure of distance was proposed by Li, Zha and Chiaromonte (2005). See also Xia (2007). } \value{ A single numeric value. } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. Li, B., Zha, H. and Chiaromonte, F. (2005) Contour regression: a general approach to dimension reduction. \emph{Annals of Statistics} \bold{33}, 1580--1616. Xia, Y. (2007) A constructive approach to the estimation of dimension reduction directions. \emph{Annals of Statistics} \bold{35}, 2654--2690. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{multivariate} \keyword{algebra} spatstat.explore/man/markcrosscorr.Rd0000644000176200001440000000731214643125461017537 0ustar liggesusers\name{markcrosscorr} \alias{markcrosscorr} \title{ Mark Cross-Correlation Function } \description{ Given a spatial point pattern with several columns of marks, this function computes the mark correlation function between each pair of columns of marks. } \usage{ markcrosscorr(X, r = NULL, correction = c("isotropic", "Ripley", "translate"), method = "density", \dots, normalise = TRUE, Xname = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{Xname}{ Optional character string name for the dataset \code{X}. } } \details{ First, all columns of marks are converted to numerical values. A factor with \eqn{m} possible levels is converted to \eqn{m} columns of dummy (indicator) values. Next, each pair of columns is considered, and the mark cross-correlation is defined as \deqn{ k_{mm}(r) = \frac{E_{0u}[M_i(0) M_j(u)]}{E[M_i,M_j]} }{ k[mm](r) = E[0u](M(i,0) * M(j,u))/E(Mi * Mj) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}. On the numerator, \eqn{M_i(0)}{M(i,0)} and \eqn{M_j(u)}{M(j,u)} are the marks attached to locations \eqn{0} and \eqn{u} respectively in the \eqn{i}th and \eqn{j}th columns of marks respectively. On the denominator, \eqn{M_i}{Mi} and \eqn{M_j}{Mj} are independent random values drawn from the \eqn{i}th and \eqn{j}th columns of marks, respectively, and \eqn{E} is the usual expectation. Note that \eqn{k_{mm}(r)}{k[mm](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_{mm}(r) \equiv 1}{k[mm](r) = 1}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern. The cross-correlations are estimated in the same manner as for \code{\link{markcorr}}. } \value{ A function array (object of class \code{"fasp"}) containing the mark cross-correlation functions for each possible pair of columns of marks. } \author{ \spatstatAuthors. } \seealso{ \code{\link{markcorr}} } \examples{ # The dataset 'betacells' has two columns of marks: # 'type' (factor) # 'area' (numeric) if(interactive()) plot(betacells) plot(markcrosscorr(betacells)) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/with.fv.Rd0000644000176200001440000000747614611073325016241 0ustar liggesusers\name{with.fv} \alias{with.fv} \title{Evaluate an Expression in a Function Table} \description{ Evaluate an R expression in a function value table (object of class \code{"fv"}). } \usage{ \method{with}{fv}(data, expr, ..., fun = NULL, enclos=NULL) } \arguments{ \item{data}{A function value table (object of class \code{"fv"}) in which the expression will be evaluated. } \item{expr}{The expression to be evaluated. An \R language expression, which may involve the names of columns in \code{data}, the special abbreviations \code{.}, \code{.x} and \code{.y}, and global constants or functions. } \item{\dots}{Ignored.} \item{fun}{Logical value, specifying whether the result should be interpreted as another function (\code{fun=TRUE}) or simply returned as a numeric vector or array (\code{fun=FALSE}). See Details. } \item{enclos}{ An environment in which to search for variables that are not found in \code{data}. Defaults to \code{\link{parent.frame}()}. } } \details{ This is a method for the generic command \code{\link{with}} for an object of class \code{"fv"} (function value table). An object of class \code{"fv"} is a convenient way of storing and plotting several different estimates of the same function. It is effectively a data frame with extra attributes. See \code{\link{fv.object}} for further explanation. This command makes it possible to perform computations that involve different estimates of the same function. For example we use it to compute the arithmetic difference between two different edge-corrected estimates of the \eqn{K} function of a point pattern. The argument \code{expr} should be an \R language expression. The expression may involve \itemize{ \item the name of any column in \code{data}, referring to one of the estimates of the function; \item the symbol \code{.} which stands for all the available estimates of the function; \item the symbol \code{.y} which stands for the recommended estimate of the function (in an \code{"fv"} object, one of the estimates is always identified as the recommended estimate); \item the symbol \code{.x} which stands for the argument of the function; \item global constants or functions. } See the Examples. The expression should be capable of handling vectors and matrices. The interpretation of the argument \code{fun} is as follows: \itemize{ \item If \code{fun=FALSE}, the result of evaluating the expression \code{expr} will be returned as a numeric vector, matrix or data frame. \item If \code{fun=TRUE}, then the result of evaluating \code{expr} will be interpreted as containing the values of a new function. The return value will be an object of class \code{"fv"}. (This can only happen if the result has the right dimensions.) \item The default is \code{fun=TRUE} if the result of evaluating \code{expr} has more than one column, and \code{fun=FALSE} otherwise. } To perform calculations involving \emph{several} objects of class \code{"fv"}, use \code{\link{eval.fv}}. } \value{ A function value table (object of class \code{"fv"}) or a numeric vector or data frame. } \seealso{ \code{\link{with}}, \code{\link{fv.object}}, \code{\link{eval.fv}}, \code{\link[spatstat.explore]{Kest}} } \examples{ # compute 4 estimates of the K function X <- runifrect(42) K <- Kest(X) plot(K) # derive 4 estimates of the L function L(r) = sqrt(K(r)/pi) L <- with(K, sqrt(./pi)) plot(L) # compute 4 estimates of V(r) = L(r)/r V <- with(L, ./.x) plot(V) # compute the maximum absolute difference between # the isotropic and translation correction estimates of K(r) D <- with(K, max(abs(iso - trans))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.explore/man/Math.fv.Rd0000644000176200001440000000566114611073323016147 0ustar liggesusers\name{Math.fv} \alias{Math.fv} \alias{Ops.fv} \alias{Complex.fv} \alias{Summary.fv} \title{S3 Group Generic Methods for Function Tables} \description{ These are group generic methods for objects of class \code{"fv"}, which allows for usual mathematical functions and operators to be applied directly to function tables. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm=FALSE, drop=TRUE)} %NAMESPACE S3method("Math", "fv") %NAMESPACE S3method("Ops", "fv") %NAMESPACE S3method("Complex", "fv") %NAMESPACE S3method("Summary", "fv") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"fv"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{ Logical value specifying whether missing values should be removed. } } \details{ Below is a list of mathematical functions and operators which are defined for objects of class \code{"fv"}. The methods are implemented using \code{\link{eval.fv}}, which tries to harmonise the functions via \code{\link{harmonise.fv}} if they aren't compatible to begin with. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } For the \code{Ops} group, one of the arguments is permitted to be a single atomic value instead of a function table. } \seealso{ \code{\link{eval.fv}} for evaluating expressions involving function tables. } \examples{ ## Convert K function to L function K <- Kest(cells) L <- sqrt(K/pi) ## Manually calculate J function FR <- Fest(redwood) GR <- Gest(redwood) suppressWarnings(JR <- (1-GR)/(1-FR)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat.explore/man/localKcross.inhom.Rd0000644000176200001440000001402114611073324020223 0ustar liggesusers\name{localKcross.inhom} \alias{localKcross.inhom} \alias{localLcross.inhom} \title{Inhomogeneous Multitype K Function} \description{ Computes spatially-weighted versions of the the local multitype \eqn{K}-function or \eqn{L}-function. } \usage{ localKcross.inhom(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, \dots, rmax = NULL, correction = "Ripley", sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) localLcross.inhom(X, from, to, lambdaFrom=NULL, lambdaTo=NULL, \dots, rmax = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{to}{ Type of points to which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{lambdaFrom,lambdaTo}{ Optional. Values of the estimated intensity function for the points of type \code{from} and \code{to}, respectively. Each argument should be either a vector giving the intensity values at the required points, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambdaFrom} and \code{lambdaTo}, if they are missing. } \item{lambdaX}{ Optional. Values of the estimated intensity function for all points of \code{X}. Either a vector giving the intensity values at each point of \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a list of pixel images giving the intensity values at all locations for each type of point, or a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} or \code{function(x,y,m)} which can be evaluated to give the intensity value at any location. } \item{update}{ Logical value indicating what to do when \code{lambdaFrom}, \code{lambdaTo} or \code{lambdaX} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \details{ The functions \code{localKcross.inhom} and \code{localLcross.inhom} are inhomogeneous or weighted versions of the local multitype \eqn{K} and \eqn{L} functions implemented in \code{\link{localKcross}} and \code{\link{localLcross}}. Given a multitype spatial point pattern \code{X}, and two designated types \code{from} and \code{to}, the local multitype \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and is computed by \deqn{ K_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ K[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} of type \code{to} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The function \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} of type \code{from}. The corresponding \eqn{L} function \eqn{L_i(r)}{L[i](r)} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- amacrine # compute all the local L functions L <- localLcross.inhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bw.relrisk.Rd0000644000176200001440000000772114611073323016725 0ustar liggesusers\name{bw.relrisk} \alias{bw.relrisk} \alias{bw.relrisk.ppp} \title{ Cross Validated Bandwidth Selection for Relative Risk Estimation } \description{ Uses cross-validation to select a smoothing bandwidth for the estimation of relative risk. } \usage{ bw.relrisk(X, ...) \method{bw.relrisk}{ppp}(X, method = "likelihood", \dots, nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{method}{ Character string determining the cross-validation method. Current options are \code{"likelihood"}, \code{"leastsquares"} or \code{"weightedleastsquares"}. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } \item{\dots}{Additional arguments passed to \code{\link{density.ppp}} or to other methods for \code{bw.relrisk}.} } \details{ This function selects an appropriate bandwidth for the nonparametric estimation of relative risk using \code{\link{relrisk}}. Consider the indicators \eqn{y_{ij}}{y[i,j]} which equal \eqn{1} when data point \eqn{x_i}{x[i]} belongs to type \eqn{j}, and equal \eqn{0} otherwise. For a particular value of smoothing bandwidth, let \eqn{\hat p_j(u)}{p*[j](u)} be the estimated probabilities that a point at location \eqn{u} will belong to type \eqn{j}. Then the bandwidth is chosen to minimise either the negative likelihood, the squared error, or the approximately standardised squared error, of the indicators \eqn{y_{ij}}{y[i,j]} relative to the fitted values \eqn{\hat p_j(x_i)}{p*[j](x[i])}. See Diggle (2003) or Baddeley et al (2015). The result is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on multiples of Stoyan's rule of thumb \code{\link{bw.stoyan}}. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A single numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \seealso{ \code{\link{relrisk}}, \code{\link{bw.stoyan}}. \code{\link[spatstat.explore]{bw.optim.object}}. } \examples{ \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.relrisk(urkiola) b plot(b) b <- bw.relrisk(urkiola, hmax=20) plot(b) \testonly{spatstat.options(op)} } \references{ \baddrubaturnbook Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/plot.scan.test.Rd0000644000176200001440000000466414643125462017534 0ustar liggesusers\name{plot.scan.test} \alias{plot.scan.test} \alias{as.im.scan.test} \title{ Plot Result of Scan Test } \description{ Computes or plots an image showing the likelihood ratio test statistic for the scan test, or the optimal circle radius. } \usage{ \method{plot}{scan.test}(x, \dots, what=c("statistic", "radius"), do.window = TRUE) \method{as.im}{scan.test}(X, \dots, what=c("statistic", "radius")) } \arguments{ \item{x,X}{ Result of a scan test. An object of class \code{"scan.test"} produced by \code{\link{scan.test}}. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{plot.im}} to control the appearance of the plot. } \item{what}{ Character string indicating whether to produce an image of the (profile) likelihood ratio test statistic (\code{what="statistic"}, the default) or an image of the optimal value of circle radius (\code{what="radius"}). } \item{do.window}{ Logical value indicating whether to plot the original window of the data as well. } } \details{ These functions extract, and plot, the spatially-varying value of the likelihood ratio test statistic which forms the basis of the scan test. If the test result \code{X} was based on circles of the same radius \code{r}, then \code{as.im(X)} is a pixel image of the likelihood ratio test statistic as a function of the position of the centre of the circle. If the test result \code{X} was based on circles of several different radii \code{r}, then \code{as.im(X)} is a pixel image of the profile (maximum value over all radii \code{r}) likelihood ratio test statistic as a function of the position of the centre of the circle, and \code{as.im(X, what="radius")} is a pixel image giving for each location \eqn{u} the value of \code{r} which maximised the likelihood ratio test statistic at that location. The \code{plot} method plots the corresponding image. } \value{ The value of \code{as.im.scan.test} is a pixel image (object of class \code{"im"}). The value of \code{plot.scan.test} is \code{NULL}. } \author{\adrian and \rolf } \seealso{ \code{\link{scan.test}}, \code{\link{scanLRTS}} } \examples{ online <- interactive() Nsim <- if(online) 19 else 2 r <- if(online) seq(0.04, 0.1, by=0.01) else c(0.05, 0.1) a <- scan.test(redwood, r=r, method="poisson", nsim=Nsim) plot(a) as.im(a) plot(a, what="radius") } \keyword{htest} \keyword{spatial} spatstat.explore/man/Jmulti.Rd0000644000176200001440000001366214643125461016116 0ustar liggesusers\name{Jmulti} \alias{Jmulti} \title{ Marked J Function } \description{ For a marked point pattern, estimate the multitype \eqn{J} function summarising dependence between the points in subset \eqn{I} and those in subset \eqn{J}. } \usage{ Jmulti(X, I, J, eps=NULL, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. See Details. } \item{J}{Subset of points in \code{X} to which distances are measured. See Details. } \item{eps}{A positive number. The pixel resolution of the discrete approximation to Euclidean distance (see \code{\link{Jest}}). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{IJ}(r)}{J[IJ](r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{un}{the uncorrected estimate of \eqn{J_{IJ}(r)}{J[IJ](r)}, formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{IJ}(r)}{1 - G[IJ](r)} and \eqn{1 - F_{J}(r)}{1 - F[J](r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{IJ}(r)}{J[IJ](r)} for a marked Poisson process with the same estimated intensity, namely 1. } } \details{ The function \code{Jmulti} generalises \code{\link{Jest}} (for unmarked point patterns) and \code{\link{Jdot}} and \code{\link{Jcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. Define \deqn{J_{IJ}(r) = \frac{1 - G_{IJ}(r)}{1 - F_J(r)}}{ J[IJ](r) = (1 - G[IJ](r))/(1 - F[J](r))} where \eqn{F_J(r)}{F[J](r)} is the cumulative distribution function of the distance from a fixed location to the nearest point of \eqn{X_J}{X[J]}, and \eqn{G_{IJ}(r)}{GJ(r)} is the distribution function of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. It is assumed that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jcross}}, \code{\link{Jdot}}, \code{\link{Jest}} } \examples{ trees <- longleaf # Longleaf Pine data: marks represent diameter \testonly{ trees <- trees[seq(1,npoints(trees), by=50)] } Jm <- Jmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(Jm) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/dg.sigtrace.Rd0000644000176200001440000001512514611073323017032 0ustar liggesusers\name{dg.sigtrace} \alias{dg.sigtrace} \title{ Significance Trace of Dao-Genton Test } \description{ Generates a Significance Trace of the Dao and Genton (2014) test for a spatial point pattern. } \usage{ dg.sigtrace(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, alternative = c("two.sided", "less", "greater"), rmin=0, leaveout=1, interpolate = FALSE, confint = TRUE, alpha = 0.05, savefuns=FALSE, savepatterns=FALSE, verbose=FALSE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. } \item{exponent}{ Positive number. Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015; Baddeley, Rubak and Turner, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dg.sigtrace} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Dao-Genton adjusted \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion. If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Unpublished manuscript. \baddrubaturnbook Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}} for the Dao-Genton test, \code{\link{dclf.sigtrace}} for significance traces of other tests. } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.sigtrace(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat.explore/man/Jest.Rd0000644000176200001440000002257214643125461015557 0ustar liggesusers\name{Jest} \alias{Jest} \title{Estimate the J-function} \description{ Estimates the summary function \eqn{J(r)} for a point pattern in a window of arbitrary shape. } \usage{ Jest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{J(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{vector of values for the argument \eqn{r} at which \eqn{J(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Character string specifying the choice of edge correction(s) in \code{\link{Fest}} and \code{\link{Gest}}. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{J} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J(r)} computed from the border-corrected estimates of \eqn{F} and \eqn{G} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J(r)} computed from the Kaplan-Meier estimates of \eqn{F} and \eqn{G} } \item{han}{the Hanisch-style estimator of \eqn{J(r)} computed from the Hanisch estimate of \eqn{G} and the Chiu-Stoyan estimate of \eqn{F} } \item{un}{the uncorrected estimate of \eqn{J(r)} computed from the uncorrected estimates of \eqn{F} and \eqn{G} } \item{theo}{the theoretical value of \eqn{J(r)} for a stationary Poisson process: identically equal to \eqn{1} } The data frame also has \bold{attributes} \item{F}{ the output of \code{\link{Fest}} for this point pattern, containing three estimates of the empty space function \eqn{F(r)} and an estimate of its hazard function } \item{G}{ the output of \code{\link{Gest}} for this point pattern, containing three estimates of the nearest neighbour distance distribution function \eqn{G(r)} and an estimate of its hazard function } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{J} function (Van Lieshout and Baddeley, 1996) of a stationary point process is defined as \deqn{J(r) = \frac{1-G(r)}{1-F(r)} }{ % J(r) = (1-G(r))/(1-F(r))} where \eqn{G(r)} is the nearest neighbour distance distribution function of the point process (see \code{\link{Gest}}) and \eqn{F(r)} is its empty space function (see \code{\link{Fest}}). For a completely random (uniform Poisson) point process, the \eqn{J}-function is identically equal to \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} typically indicate spatial clustering or spatial regularity, respectively. The \eqn{J}-function is one of the few characteristics that can be computed explicitly for a wide range of point processes. See Van Lieshout and Baddeley (1996), Baddeley et al (2000), Thonnes and Van Lieshout (1999) for further information. An estimate of \eqn{J} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{J(r)} is compared against the constant function \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} may suggest spatial clustering or spatial regularity, respectively. This algorithm estimates the \eqn{J}-function from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link[spatstat.geom]{as.ppp}()}. The functions \code{\link{Fest}} and \code{\link{Gest}} are called to compute estimates of \eqn{F(r)} and \eqn{G(r)} respectively. These estimates are then combined by simply taking the ratio \eqn{J(r) = (1-G(r))/(1-F(r))}. In fact several different estimates are computed using different edge corrections (Baddeley, 1998). The Kaplan-Meier estimate (returned as \code{km}) is the ratio \code{J = (1-G)/(1-F)} of the Kaplan-Meier estimates of \eqn{1-F} and \eqn{1-G} computed by \code{\link{Fest}} and \code{\link{Gest}} respectively. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"km"}. The Hanisch-style estimate (returned as \code{han}) is the ratio \code{J = (1-G)/(1-F)} where \code{F} is the Chiu-Stoyan estimate of \eqn{F} and \code{G} is the Hanisch estimate of \eqn{G}. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"cs"} or \code{"han"}. The reduced-sample or border corrected estimate (returned as \code{rs}) is the same ratio \code{J = (1-G)/(1-F)} of the border corrected estimates. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"rs"} or \code{"border"}. These edge-corrected estimators are slightly biased for \eqn{J}, since they are ratios of approximately unbiased estimators. The logarithm of the Kaplan-Meier estimate is exactly unbiased for \eqn{\log J}{log J}. The uncorrected estimate (returned as \code{un} and computed only if \code{correction} includes \code{"none"}) is the ratio \code{J = (1-G)/(1-F)} of the uncorrected (``raw'') estimates of the survival functions of \eqn{F} and \eqn{G}, which are the empirical distribution functions of the empty space distances \code{Fest(X,\dots)$raw} and of the nearest neighbour distances \code{Gest(X,\dots)$raw}. The uncorrected estimates of \eqn{F} and \eqn{G} are severely biased. However the uncorrected estimate of \eqn{J} is approximately unbiased (if the process is close to Poisson); it is insensitive to edge effects, and should be used when edge effects are severe (see Baddeley et al, 2000). The algorithm for \code{\link{Fest}} uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. See \code{\link{Fest}} for details. First-time users are strongly advised not to specify these arguments. Note that the value returned by \code{Jest} includes the output of \code{\link{Fest}} and \code{\link{Gest}} as attributes (see the last example below). If the user is intending to compute the \code{F,G} and \code{J} functions for the point pattern, it is only necessary to call \code{Jest}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263--292. Baddeley, A., Kerscher, M., Schladitz, K. and Scott, B.T. Estimating the \emph{J} function without edge correction. \emph{Statistica Neerlandica} \bold{54} (2000) 315--328. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Thonnes, E. and Van Lieshout, M.N.M, A comparative study on the power of Van Lieshout and Baddeley's J-function. \emph{Biometrical Journal} \bold{41} (1999) 721--734. Van Lieshout, M.N.M. and Baddeley, A.J. A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50} (1996) 344--361. } \seealso{ \code{\link{Jinhom}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Kest}}, \code{\link[spatstat.univar]{km.rs}}, \code{\link[spatstat.univar]{reduced.sample}}, \code{\link[spatstat.univar]{kaplan.meier}} } \examples{ J <- Jest(cells, 0.01) plot(J, main="cells data") # values are far above J = 1, indicating regular pattern data(redwood) J <- Jest(redwood, 0.01, legendpos="center") plot(J, main="redwood data") # values are below J = 1, indicating clustered pattern } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/dg.progress.Rd0000644000176200001440000001500014611073323017065 0ustar liggesusers\name{dg.progress} \alias{dg.progress} \title{ Progress Plot of Dao-Genton Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Dao-Genton test for a spatial point pattern. } \usage{ dg.progress(X, fun = Lest, \dots, exponent = 2, nsim = 19, nsimsub = nsim - 1, nrank = 1, alpha, leaveout=1, interpolate = FALSE, rmin=0, savefuns = FALSE, savepatterns = FALSE, verbose=TRUE) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{alternative} to specify one-sided or two-sided envelopes. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{alpha}{ Optional. The significance level of the test. Equivalent to \code{nrank/(nsim+1)} where \code{nsim} is the number of simulations. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating how to compute the critical value. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, and the critical value is the largest simulated value of the test statistic (if \code{nrank=1}) or the \code{nrank}-th largest (if \code{nrank} is another number). If \code{interpolate=TRUE}, kernel density estimation is applied to the simulated values, and the critical value is the upper \code{alpha} quantile of this estimated distribution. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } \item{savefuns}{ Logical value indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical value indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The Dao and Genton (2014) test for a spatial point pattern is described in \code{\link{dg.test}}. This test depends on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2014, 2015; Baddeley, Rubak and Turner, 2015) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dg.progress} effectively performs \code{\link{dg.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{R}. The result is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the test acceptance region (grey shading). If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Unpublished manuscript. \baddrubaturnbook Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dg.test}}, \code{\link{dclf.progress}} } \examples{ ns <- if(interactive()) 19 else 5 plot(dg.progress(cells, nsim=ns)) } \keyword{spatial} \keyword{htest} spatstat.explore/man/roc.Rd0000644000176200001440000000557414650323373015440 0ustar liggesusers\name{roc} \alias{roc} \alias{roc.ppp} \title{ Receiver Operating Characteristic } \description{ Computes the Receiver Operating Characteristic curve for a point pattern or a fitted point process model. } \usage{ roc(X, \dots) \method{roc}{ppp}(X, covariate, \dots, high = TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{as.mask}} controlling the pixel resolution for calculations. } } \details{ This command computes Receiver Operating Characteristic curve. The area under the ROC is computed by \code{\link[spatstat.explore]{auc}}. For a point pattern \code{X} and a covariate \code{Z}, the ROC is a plot showing the ability of the covariate to separate the spatial domain into areas of high and low density of points. For each possible threshold \eqn{z}, the algorithm calculates the fraction \eqn{a(z)} of area in the study region where the covariate takes a value greater than \eqn{z}, and the fraction \eqn{b(z)} of data points for which the covariate value is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against \eqn{a(z)} for all thresholds \eqn{z}. For a fitted point process model, the ROC shows the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model (Lobo et al, 2007). (For spatial logistic regression models (class \code{"slrm"}) replace \dQuote{intensity} by \dQuote{probability of presence} in the text above.) } \value{ Function value table (object of class \code{"fv"}) which can be plotted to show the ROC curve. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.explore]{auc}} } \examples{ plot(roc(swedishpines, "x")) } \keyword{spatial} spatstat.explore/man/berman.test.Rd0000644000176200001440000001445214650323373017072 0ustar liggesusers\name{berman.test} \alias{berman.test} \alias{berman.test.ppp} \title{Berman's Tests for Point Process Model} \description{ Tests the goodness-of-fit of a Poisson point process model using methods of Berman (1986). } \usage{ berman.test(...) \method{berman.test}{ppp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Additional arguments controlling the pixel resolution (arguments \code{dimyx}, \code{eps} and \code{rule.eps} passed to \code{\link[spatstat.geom]{as.mask}}) or other undocumented features. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The \eqn{Z_1}{Z[1]} test is also known as the Lawson-Waller test. The function \code{berman.test} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{berman.test(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{berman.test(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}. Closely-related tests were proposed independently by Waller et al (1993) and Lawson (1993) so this test is often termed the Lawson-Waller test in epidemiological literature. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link[spatstat.explore]{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.explore]{cdf.test}}, \code{\link[spatstat.explore]{quadrat.test}}, \code{\link[spatstat.model]{ppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Lawson, A.B. (1993) On the analysis of mortality events around a prespecified fixed point. \emph{Journal of the Royal Statistical Society, Series A} \bold{156} (3) 363--377. Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992) Chronic Disease Surveillance and testing of clustering of disease and exposure: Application to leukaemia incidence and TCE-contaminated dumpsites in upstate New York. \emph{Environmetrics} \bold{3}, 281--300. } \examples{ # Berman's data X <- copper$SouthPoints L <- copper$SouthLines D <- distmap(L, eps=1) # test of CSR berman.test(X, D) berman.test(X, D, "Z2") } \keyword{htest} \keyword{spatial} \concept{Goodness-of-fit} spatstat.explore/man/rat.Rd0000644000176200001440000000345514643125462015440 0ustar liggesusers\name{rat} \alias{rat} \title{ Ratio object } \description{ Stores the numerator, denominator, and value of a ratio as a single object. } \usage{ rat(ratio, numerator, denominator, check = TRUE) } \arguments{ \item{ratio,numerator,denominator}{ Three objects belonging to the same class. } \item{check}{ Logical. Whether to check that the objects are \code{\link[spatstat.geom]{compatible}}. } } \details{ The class \code{"rat"} is a simple mechanism for keeping track of the numerator and denominator when calculating a ratio. Its main purpose is simply to signal that the object is a ratio. The function \code{rat} creates an object of class \code{"rat"} given the numerator, the denominator and the ratio. No calculation is performed; the three objects are simply stored together. The arguments \code{ratio}, \code{numerator}, \code{denominator} can be objects of any kind. They should belong to the same class. It is assumed that the relationship \deqn{ \mbox{ratio} = \frac{\mbox{numerator}}{\mbox{denominator}} }{ ratio = numerator/denominator } holds in some version of arithmetic. However, no calculation is performed. By default the algorithm checks whether the three arguments \code{ratio}, \code{numerator}, \code{denominator} are compatible objects, according to \code{\link[spatstat.geom]{compatible}}. The result is equivalent to \code{ratio} except for the addition of extra information. } \value{ An object equivalent to the object \code{ratio} except that it also belongs to the class \code{"rat"} and has additional attributes \code{numerator} and \code{denominator}. } \author{\adrian and \rolf. } \seealso{ \code{\link[spatstat.geom]{compatible}}, \code{\link[spatstat.explore]{pool}} } \keyword{spatial} \keyword{manip} spatstat.explore/man/idw.Rd0000644000176200001440000001201214643125461015421 0ustar liggesusers\name{idw} \alias{idw} \title{Inverse-distance weighted smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations using inverse-distance weighting. } \usage{ idw(X, power=2, at=c("pixels", "points"), ..., se=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{power}{Numeric. Power of distance used in the weighting.} \item{at}{ Character string specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). String is partially matched. } \item{\dots}{Arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution of the result.} \item{se}{ Logical value specifying whether to calculate a standard error. } } \details{ This function performs spatial smoothing of numeric values observed at a set of irregular locations. Smoothing is performed by inverse distance weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is \deqn{ g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i} }{ g(u) = (sum of w[i] * v[i])/(sum of w[i]) } where the weights are the inverse \eqn{p}-th powers of distance, \deqn{ w_i = \frac 1 {d(u,x_i)^p} }{ w[i] = 1/d(u,x[i])^p } where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])} is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame. Then the smoothing procedure is applied to each column of marks. If \code{at="pixels"} (the default), the smoothed mark value is calculated at a grid of pixels, and the result is a pixel image. The arguments \code{\dots} control the pixel resolution. See \code{\link[spatstat.geom]{as.mask}}. If \code{at="points"}, the smoothed mark values are calculated at the data points only, using a leave-one-out rule (the mark value at a data point is excluded when calculating the smoothed value for that point). An estimate of standard error is also calculated, if \code{se=TRUE}. The calculation assumes that the data point locations are fixed, that is, the standard error only takes into account the variability in the mark values, and not the variability due to randomness of the data point locations. An alternative to inverse-distance weighting is kernel smoothing, which is performed by \code{\link{Smooth.ppp}}. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } If \code{se=TRUE}, then the result is a list with two entries named \code{estimate} and \code{SE}, which each have the format described above. } \seealso{ \code{\link{density.ppp}}, \code{\link[spatstat.geom]{ppp.object}}, \code{\link[spatstat.geom]{im.object}}. See \code{\link{Smooth.ppp}} for kernel smoothing, \code{\link{SpatialMedian.ppp}} for median smoothing and \code{\link[spatstat.geom]{nnmark}} for nearest-neighbour interpolation. To perform other kinds of interpolation, see also the \code{akima} package. } \examples{ # data frame of marks: trees marked by diameter and height plot(idw(finpines)) idw(finpines, at="points")[1:5,] plot(idw(finpines, se=TRUE)$SE) idw(finpines, at="points", se=TRUE)$SE[1:5, ] } \references{ Shepard, D. (1968) A two-dimensional interpolation function for irregularly-spaced data. \emph{Proceedings of the 1968 ACM National Conference}, 1968, pages 517--524. DOI: 10.1145/800186.810616 } \author{ \spatstatAuthors. Variance calculation by Andrew P Wheeler with modifications by Adrian Baddeley. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/methods.rhohat.Rd0000644000176200001440000000660114611073324017567 0ustar liggesusers\name{methods.rhohat} \alias{methods.rhohat} %DoNotExport \alias{print.rhohat} \alias{plot.rhohat} \alias{predict.rhohat} \alias{simulate.rhohat} \title{ Methods for Intensity Functions of Spatial Covariate } \description{ These are methods for the class \code{"rhohat"}. } \usage{ \method{print}{rhohat}(x, ...) \method{plot}{rhohat}(x, ..., do.rug=TRUE) \method{predict}{rhohat}(object, ..., relative=FALSE, what=c("rho", "lo", "hi", "se")) \method{simulate}{rhohat}(object, nsim=1, ..., drop=TRUE) } \arguments{ \item{x,object}{ An object of class \code{"rhohat"} representing a smoothed estimate of the intensity function of a point process. } \item{\dots}{ Arguments passed to other methods. } \item{do.rug}{ Logical value indicating whether to plot the observed values of the covariate as a rug plot along the horizontal axis. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } \item{nsim}{ Number of simulations to be generated. } \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), a point pattern is returned. If \code{drop=FALSE}, a list of length 1 containing a point pattern is returned. } \item{what}{ Optional character string (partially matched) specifying which value should be calculated: either the function estimate (\code{what="rho"}, the default), the lower or upper end of the confidence interval (\code{what="lo"} or \code{what="hi"}) or the standard error (\code{what="se"}). } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link[graphics]{plot}}, \code{\link[stats]{predict}} and \code{\link[stats]{simulate}} for the class \code{"rhohat"}. An object of class \code{"rhohat"} is an estimate of the intensity of a point process, as a function of a given spatial covariate. See \code{\link{rhohat}}. The method \code{plot.rhohat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. The method \code{predict.rhohat} computes a pixel image of the intensity \eqn{\rho(Z(u))}{rho(Z(u))} at each spatial location \eqn{u}, where \eqn{Z} is the spatial covariate. The method \code{simulate.rhohat} invokes \code{predict.rhohat} to determine the predicted intensity, and then simulates a Poisson point process with this intensity. } \value{ For \code{predict.rhohat} the value is a pixel image (object of class \code{"im"} or \code{"linim"}). For \code{simulate.rhohat} the value is a point pattern (object of class \code{"ppp"} or \code{"lpp"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rhohat}} } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, function(x,y){x}) rho plot(rho) Y <- predict(rho) plot(Y) plot(simulate(rho), add=TRUE) # if(require("spatstat.model")) { fit <- ppm(X, ~x) rho <- rhohat(fit, "y") opa <- par(mfrow=c(1,2)) plot(predict(rho)) plot(predict(rho, relative=TRUE)) par(opa) plot(predict(rho, what="se")) } } \keyword{spatial} \keyword{methods} spatstat.explore/man/quadrat.test.splitppp.Rd0000644000176200001440000000375714643125462021150 0ustar liggesusers\name{quadrat.test.splitppp} \alias{quadrat.test.splitppp} \title{Dispersion Test of CSR for Split Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for each of the component patterns in a split point pattern, based on quadrat counts. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ \method{quadrat.test}{splitppp}(X, ..., df=NULL, df.est=NULL, Xname=NULL) } \arguments{ \item{X}{ A split point pattern (object of class \code{"splitppp"}), each component of which will be subjected to the goodness-of-fit test. } \item{\dots}{Arguments passed to \code{\link{quadrat.test.ppp}}.} \item{df,df.est,Xname}{Arguments passed to \code{\link{pool.quadrattest}}.} } \details{ The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}) and point process models (class \code{"ppm"}). If \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness, then combine the result into a single test. The method \code{quadrat.test.ppp} is applied to each component point pattern. Then the results are pooled using \code{\link{pool.quadrattest}} to obtain a single test. } \seealso{ \code{\link{quadrat.test}}, \code{\link[spatstat.geom]{quadratcount}}, \code{\link[spatstat.geom]{quadrats}}, \code{\link[spatstat.random]{quadratresample}}, \code{\link{chisq.test}}, \code{\link{cdf.test}}. To test a Poisson point process model against a specific Poisson alternative, use \code{\link[spatstat.model]{anova.ppm}}. } \value{ An object of class \code{"quadrattest"} which can be printed and plotted. } \examples{ qH <- quadrat.test(split(humberside), 2, 3) plot(qH) qH } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \concept{Test of randomness} \concept{Test of clustering} spatstat.explore/man/Lcross.inhom.Rd0000644000176200001440000001004214611073323017207 0ustar liggesusers\name{Lcross.inhom} \alias{Lcross.inhom} \title{ Inhomogeneous Cross Type L Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross-type \eqn{L} function. } \usage{ Lcross.inhom(X, i, j, \dots, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kcross.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{ij}(r)}{Lij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{ij}(r)}{Lij(r)} for a marked Poisson process, identically equal to \code{r} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}(r)}{Lij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Lcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kcross.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{ij}(r)}{Kij(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Lcross}}, \code{\link{Linhom}}, \code{\link{Kcross.inhom}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") L <- Lcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): fit parametric intensity model if(require("spatstat.model")) { fit <- ppm(woods ~marks * polynom(x,y,2)) # evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, marks(woods)) L <- Lcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) } # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Lcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bw.pcf.Rd0000644000176200001440000001204014611073323016010 0ustar liggesusers\name{bw.pcf} \alias{bw.pcf} \title{ Cross Validated Bandwidth Selection for Pair Correlation Function } \description{ Uses composite likelihood or generalized least squares cross-validation to select a smoothing bandwidth for the kernel estimation of pair correlation function. } \usage{ bw.pcf(X, rmax=NULL, lambda=NULL, divisor="r", kernel="epanechnikov", nr=10000, bias.correct=TRUE, cv.method=c("compLik", "leastSQ"), simple=TRUE, srange=NULL, \dots, verbose=FALSE, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{rmax}{ Numeric. Maximum value of the spatial lag distance \eqn{r} for which \eqn{g(r)} should be evaluated. } \item{lambda}{ Optional. Values of the estimated intensity function. A vector giving the intensity values at the points of the pattern \code{X}. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{pcf.ppp}. } \item{kernel}{ Choice of smoothing kernel, passed to \code{density}; see \code{\link{pcf}} and \code{\link{pcfinhom}}. } \item{nr}{ Integer. Number of subintervals for discretization of [0, rmax] to use in computing numerical integrals. } \item{bias.correct}{ Logical. Whether to use bias corrected version of the kernel estimate. See Details. } \item{cv.method}{ Choice of cross validation method: either \code{"compLik"} or \code{"leastSQ"} (partially matched). } \item{simple}{ Logical. Whether to use simple removal of spatial lag distances. See Details. } \item{srange}{ Optional. Numeric vector of length 2 giving the range of bandwidth values that should be searched to find the optimum bandwidth. } \item{\dots}{ Other arguments, passed to \code{\link{pcf}} or \code{\link{pcfinhom}}. } \item{verbose}{ Logical value indicating whether to print progress reports during the optimization procedure. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the optimum value of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{bw} for the kernel estimator of the pair correlation function of a point process intensity computed by \code{\link{pcf.ppp}} (homogeneous case) or \code{\link{pcfinhom}} (inhomogeneous case). With \code{cv.method="leastSQ"}, the bandwidth \eqn{h} is chosen to minimise an unbiased estimate of the integrated mean-square error criterion \eqn{M(h)} defined in equation (4) in Guan (2007a). The code implements the fast algorithm of Jalilian and Waagepetersen (2018). With \code{cv.method="compLik"}, the bandwidth \eqn{h} is chosen to maximise a likelihood cross-validation criterion \eqn{CV(h)} defined in equation (6) of Guan (2007b). \deqn{ M(b) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(b) = \int_{0}^{rmax} \hat{g}^2(r;b) r dr - \sum_{u,v} } The result is a numerical value giving the selected bandwidth. } \section{Definition of bandwidth}{ The bandwidth \code{bw} returned by \code{bw.pcf} is the standard deviation of the smoothing kernel, following the standard convention in \R. As mentioned in the documentation for \code{\link{density.default}} and \code{\link{pcf.ppp}}, this differs from other definitions of bandwidth that can be found in the literature. The scale parameter \code{h}, which is called the bandwidth in some literature, is defined differently. For example for the Epanechnikov kernel, \code{h} is the half-width of the kernel, and \code{bw=h/sqrt(5)}. } \value{ A single numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link[spatstat.explore]{bw.optim.object}} } \examples{ b <- bw.pcf(redwood) plot(pcf(redwood, bw=b)) } \references{ Guan, Y. (2007a). A composite likelihood cross-validation approach in selecting bandwidth for the estimation of the pair correlation function. \emph{Scandinavian Journal of Statistics}, \bold{34}(2), 336--346. Guan, Y. (2007b). A least-squares cross-validation bandwidth selection approach in pair correlation function estimations. \emph{Statistics & Probability Letters}, \bold{77}(18), 1722--1729. Jalilian, A. and Waagepetersen, R. (2018) Fast bandwidth selection for estimation of the pair correlation function. \emph{Journal of Statistical Computation and Simulation}, \bold{88}(10), 2001--2011. \url{https://www.tandfonline.com/doi/full/10.1080/00949655.2018.1428606} } \author{ Rasmus Waagepetersen and Abdollah Jalilian. Adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/adaptive.density.Rd0000644000176200001440000000402514643125461020116 0ustar liggesusers\name{adaptive.density} \alias{adaptive.density} \title{Adaptive Estimate of Intensity of Point Pattern} \description{ Computes an adaptive estimate of the intensity function of a point pattern. } \usage{ adaptive.density(X, \dots, method=c("voronoi","kernel", "nearest")) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"} or \code{"lpp"}).} \item{method}{Character string specifying the estimation method} \item{\dots}{ Additional arguments passed to \code{\link{densityVoronoi}}, \code{\link{densityAdaptiveKernel.ppp}} or \code{\link{nndensity.ppp}}. } } \details{ This function is an alternative to \code{\link{density.ppp}} and \code{\link[spatstat.linnet]{density.lpp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity. If \code{method="voronoi"} the data are passed to the function \code{\link{densityVoronoi}} which estimates the intensity using the Voronoi-Dirichlet tessellation. If \code{method="kernel"} the data are passed to the function \code{\link{densityAdaptiveKernel.ppp}} which estimates the intensity using a variable-bandwidth kernel estimator. (This is not yet supported when \code{X} has class \code{"lpp"}.) If \code{method="nearest"} the data are passed to the function \code{\link{nndensity.ppp}} which estimates the intensity using the distance to the \code{k}-th nearest data point. (This is not yet supported when \code{X} has class \code{"lpp"}.) } \value{ A pixel image (object of class \code{"im"} or \code{"linim"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{densityVoronoi}}, \code{\link{densityAdaptiveKernel.ppp}}, \code{\link{nndensity.ppp}}, \code{\link[spatstat.geom]{im.object}}. } \examples{ plot(adaptive.density(nztrees, 1), main="Voronoi estimate") } \author{ \spatstatAuthors and \mehdi. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Adaptive smoothing} spatstat.explore/man/formula.fv.Rd0000644000176200001440000000357614611073324016727 0ustar liggesusers\name{formula.fv} \alias{formula.fv} \alias{formula<-} \alias{formula<-.fv} \title{ Extract or Change the Plot Formula for a Function Value Table } \description{ Extract or change the default plotting formula for an object of class \code{"fv"} (function value table). } \usage{ \method{formula}{fv}(x, \dots) formula(x, \dots) <- value \method{formula}{fv}(x, \dots) <- value } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the values of several estimates of a function. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ New value of the formula. Either a \code{formula} or a character string. } } \details{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) is a convenient way of storing and plotting several different estimates of the same function. The default behaviour of \code{plot(x)} for a function value table \code{x} is determined by a formula associated with \code{x} called its \emph{plot formula}. See \code{\link{plot.fv}} for explanation about these formulae. The function \code{formula.fv} is a method for the generic command \code{\link{formula}}. It extracts the plot formula associated with the object. The function \code{formula<-} is generic. It changes the formula associated with an object. The function \code{formula<-.fv} is the method for \code{formula<-} for the class \code{"fv"}. It changes the plot formula associated with the object. } \value{ The result of \code{formula.fv} is a character string containing the plot formula. The result of \code{formula<-.fv} is a new object of class \code{"fv"}. } \author{ \adrian and \rolf } \seealso{ \code{\link{fv}}, \code{\link{plot.fv}}, \code{\link[stats]{formula}}. } \examples{ K <- Kest(cells) formula(K) formula(K) <- (iso ~ r) } \keyword{spatial} \keyword{methods} spatstat.explore/man/Ldot.inhom.Rd0000644000176200001440000000654314611073323016657 0ustar liggesusers\name{Ldot.inhom} \alias{Ldot.inhom} \title{ Inhomogeneous Multitype L Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{L} function. } \usage{ Ldot.inhom(X, i, \dots, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{i\bullet}(r)}{Li.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kdot.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}(r)}{Li.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{i\bullet}(r)}{Li.(r)} for a marked Poisson process, identical to \eqn{r}. } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}(r)}{Li.(r)} obtained by the edge corrections named. } \details{ This a generalisation of the function \code{\link{Ldot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kdot.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{i\bullet}(r)}{Ki.(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Ldot}}, \code{\link{Linhom}}, \code{\link{Kdot.inhom}}, \code{\link{Lcross.inhom}}. } \examples{ # Lansing Woods data lan <- lansing lan <- lan[seq(1,npoints(lan), by=10)] ma <- split(lan)$maple lg <- unmark(lan) # Estimate intensities by nonparametric smoothing lambdaM <- density(ma, sigma=0.15, at="points") lambdadot <- density(lg, sigma=0.15, at="points") L <- Ldot.inhom(lan, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Ldot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/SpatialMedian.ppp.Rd0000644000176200001440000001161714611073325020155 0ustar liggesusers\name{SpatialMedian.ppp} \alias{SpatialMedian.ppp} \title{ Spatially Weighted Median of Values at Points } \description{ Given a spatial point pattern with numeric marks, compute a weighted median of the mark values, with spatially-varying weights that depend on distance to the data points. } \usage{ \method{SpatialMedian}{ppp}(X, sigma = NULL, \dots, type = 4, at = c("pixels", "points"), leaveoneout = TRUE, weights = NULL, edge = TRUE, diggle = FALSE, verbose = FALSE) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}) with numeric marks. } \item{sigma}{ Smoothing bandwidth, passed to \code{\link{density.ppp}}. } \item{\dots}{ Further arguments passed to \code{\link{density.ppp}} controlling the spatial smoothing. } \item{type}{ Integer specifying the type of median (using the convention of \code{\link[stats]{quantile.default}}; see Details). Only types 1 and 4 are currently implemented. } \item{at}{ Character string indicating whether to compute the median at every pixel of a pixel image (\code{at="pixels"}, the default) or at every data point of \code{X} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{weights}{ Optional vector of numeric weights attached to the points of \code{X}. } \item{edge,diggle}{ Arguments passed to \code{\link{density.ppp}} to determine the edge correction. } \item{verbose}{ Logical value specifying whether to print progress reports during the calculation. } } \details{ The argument \code{X} should be a spatial point pattern (object of class \code{"ppp"}) with numeric marks. The algorithm computes the weighted median of the mark values at each desired spatial location, using spatially-varying weights which depend on distance to the data points. Suppose the data points are at spatial locations \eqn{x_1,\ldots,x_n}{x[1], ..., x[n]} and have mark values \eqn{y_1,\ldots,y_n}{y[1], ..., y[n]}. For a query location \eqn{u}, the smoothed median is defined as the weighted median of the mark values \eqn{y_1,\ldots,y_n}{y[1], ..., y[n]} with weights \eqn{w_1,\ldots,w_n}{w[1], ..., w[n]}, where \deqn{ w_i = \frac{k(u,x_i)}{\sum_{j=1}^n k(u,x_j)} }{ w[i] = k(u,x[i])/(k(u, x[1]) + ... + k(u, x[n])) } where \eqn{k(u,v)} is the smoothing kernel with bandwidth \code{sigma} If \code{at="points"} and \code{leaveoneout=TRUE}, then a leave-one-out calculation is performed, which means that when the query location is a data point \eqn{x_i}{x[i]}, the value at the data point is ignored, and the weighted median is computed from the values \eqn{y_j}{y[j]} for all \eqn{j} not equal to \eqn{i}. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. The calculation of the median value depends on the argument \code{type} which is interpreted in the same way as for \code{\link[stats]{quantile.default}}. Currently, only types 1 and 4 are implemented. If \code{type=1}, the median is always one of the mark values (one of the values in \code{marks(x)}). If \code{type=4} (the default), the median value is obtained by linearly interpolating between mark values. Note that the default values of \code{type} in \code{SpatialMedian.ppp} and \code{\link{SpatialQuantile.ppp}} are different. } \author{ \adrian. } \seealso{ Generic function \code{\link{SpatialMedian}}. \code{\link{SpatialQuantile}} and \code{\link{SpatialQuantile.ppp}} for other quantiles. \code{\link{Smooth.ppp}} for the spatially weighted average. } \examples{ X <- longleaf if(!interactive()) { ## mark values rounded to nearest multiple of 10 to reduce check time marks(X) <- round(marks(X), -1) } Z <- SpatialMedian(X, sigma=30) ZX <- SpatialMedian(X, sigma=30, at="points") } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/sdr.Rd0000644000176200001440000000712014611073325015426 0ustar liggesusers\name{sdr} \alias{sdr} \alias{sdr.ppp} \title{ Sufficient Dimension Reduction } \description{ Given a point pattern and a set of predictors, find a minimal set of new predictors, each constructed as a linear combination of the original predictors. } \usage{ sdr(X, covariates, \dots) \method{sdr}{ppp}(X, covariates, method = c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1 = 1, Dim2 = 1, predict=FALSE, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{covariates}{ A list of pixel images (objects of class \code{"im"}) to serve as predictor variables. } \item{method}{ Character string indicating which method to use. See Details. } \item{Dim1}{ Dimension of the first order Central Intensity Subspace (applicable when \code{method} is \code{"DR"}, \code{"NNIR"}, \code{"SAVE"} or \code{"TSE"}). } \item{Dim2}{ Dimension of the second order Central Intensity Subspace (applicable when \code{method="TSE"}). } \item{predict}{ Logical value indicating whether to compute the new predictors as well. } \item{\dots}{ Additional arguments (ignored by \code{sdr.ppp}). } } \details{ Given a point pattern \eqn{X} and predictor variables \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}, Sufficient Dimension Reduction methods (Guan and Wang, 2010) attempt to find a minimal set of new predictor variables, each constructed by taking a linear combination of the original predictors, which explain the dependence of \eqn{X} on \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}. The methods do not assume any particular form of dependence of the point pattern on the predictors. The predictors are assumed to be Gaussian random fields. Available methods are: \tabular{ll}{ \code{method="DR"} \tab directional regression \cr \code{method="NNIR"} \tab nearest neighbour inverse regression \cr \code{method="SAVE"} \tab sliced average variance estimation \cr \code{method="SIR"} \tab sliced inverse regression \cr \code{method="TSE"} \tab two-step estimation \cr } The result includes a matrix \code{B} whose columns are estimates of the basis vectors of the space of new predictors. That is, the \code{j}th column of \code{B} expresses the \code{j}th new predictor as a linear combination of the original predictors. If \code{predict=TRUE}, the new predictors are also evaluated. They can also be evaluated using \code{\link{sdrPredict}}. } \value{ A list with components \code{B, M} or \code{B, M1, M2} where \code{B} is a matrix whose columns are estimates of the basis vectors for the space, and \code{M} or \code{M1,M2} are matrices containing estimates of the kernel. If \code{predict=TRUE}, the result also includes a component \code{Y} which is a list of pixel images giving the values of the new predictors. } \examples{ A <- sdr(bei, bei.extra, predict=TRUE) A Y1 <- A$Y[[1]] plot(Y1) points(bei, pch=".", cex=2) # investigate likely form of dependence plot(rhohat(bei, Y1)) } \seealso{ \code{\link{sdrPredict}} to compute the new predictors from the coefficient matrix. \code{\link{dimhat}} to estimate the subspace dimension. \code{\link{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Matlab original by Yongtao Guan, translated to \R by Suman Rakshit. } \keyword{spatial} \keyword{multivariate} spatstat.explore/man/hopskel.Rd0000644000176200001440000000630614643125461016314 0ustar liggesusers\name{hopskel} \alias{hopskel} \alias{hopskel.test} \title{Hopkins-Skellam Test} \description{ Perform the Hopkins-Skellam test of Complete Spatial Randomness, or simply calculate the test statistic. } \usage{ hopskel(X) hopskel.test(X, \dots, alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{method}{ Method of performing the test. Partially matched. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } \item{\dots}{Ignored.} } \details{ Hopkins and Skellam (1954) proposed a test of Complete Spatial Randomness based on comparing nearest-neighbour distances with point-event distances. If the point pattern \code{X} contains \code{n} points, we first compute the nearest-neighbour distances \eqn{P_1, \ldots, P_n}{P[1], ..., P[n]} so that \eqn{P_i}{P[i]} is the distance from the \eqn{i}th data point to the nearest other data point. Then we generate another completely random pattern \code{U} with the same number \code{n} of points, and compute for each point of \code{U} the distance to the nearest point of \code{X}, giving distances \eqn{I_1, \ldots, I_n}{I[1], ..., I[n]}. The test statistic is \deqn{ A = \frac{\sum_i P_i^2}{\sum_i I_i^2} }{ A = (sum[i] P[i]^2) / (sum[i] I[i]^2) } The null distribution of \eqn{A} is roughly an \eqn{F} distribution with shape parameters \eqn{(2n,2n)}. (This is equivalent to using the test statistic \eqn{H=A/(1+A)} and referring \eqn{H} to the Beta distribution with parameters \eqn{(n,n)}). The function \code{hopskel} calculates the Hopkins-Skellam test statistic \eqn{A}, and returns its numeric value. This can be used as a simple summary of spatial pattern: the value \eqn{H=1} is consistent with Complete Spatial Randomness, while values \eqn{H < 1} are consistent with spatial clustering, and values \eqn{H > 1} are consistent with spatial regularity. The function \code{hopskel.test} performs the test. If \code{method="asymptotic"} (the default), the test statistic \eqn{H} is referred to the \eqn{F} distribution. If \code{method="MonteCarlo"}, a Monte Carlo test is performed using \code{nsim} simulated point patterns. } \value{ The value of \code{hopskel} is a single number. The value of \code{hopskel.test} is an object of class \code{"htest"} representing the outcome of the test. It can be printed. } \references{ Hopkins, B. and Skellam, J.G. (1954) A new method of determining the type of distribution of plant individuals. \emph{Annals of Botany} \bold{18}, 213--227. } \seealso{ \code{\link{clarkevans}}, \code{\link{clarkevans.test}}, \code{\link[spatstat.geom]{nndist}}, \code{\link[spatstat.geom]{nncross}} } \examples{ hopskel(redwood) hopskel.test(redwood, alternative="clustered") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} \keyword{htest} spatstat.explore/man/edge.Ripley.Rd0000644000176200001440000000610014611073324017001 0ustar liggesusers\name{edge.Ripley} \alias{edge.Ripley} \alias{rmax.Ripley} \title{ Ripley's Isotropic Edge Correction } \description{ Computes Ripley's isotropic edge correction weights for a point pattern. } \usage{ edge.Ripley(X, r, W = Window(X), method = c("C", "interpreted"), maxweight = 100, internal=list()) rmax.Ripley(W) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{r}{ Vector or matrix of interpoint distances for which the edge correction should be computed. } \item{method}{ Choice of algorithm. Either \code{"interpreted"} or \code{"C"}. This is needed only for debugging purposes. } \item{maxweight}{ Maximum permitted value of the edge correction weight. } \item{internal}{For developer use only.} } \details{ The function \code{edge.Ripley} computes Ripley's (1977) isotropic edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Ripley} computes the maximum value of distance \eqn{r} for which the isotropic edge correction estimate of \eqn{K(r)} is valid. For a single point \eqn{x} in a window \eqn{W}, and a distance \eqn{r > 0}, the isotropic edge correction weight is \deqn{ e(u, r) = \frac{2\pi r}{\mbox{length}(c(u,r) \cap W)} }{ e(u, r) = 2 * \pi * r/length(intersection(c(u,r), W)) } where \eqn{c(u,r)} is the circle of radius \eqn{r} centred at the point \eqn{u}. The denominator is the length of the overlap between this circle and the window \eqn{W}. The function \code{edge.Ripley} computes this edge correction weight for each point in the point pattern \code{X} and for each corresponding distance value in the vector or matrix \code{r}. If \code{r} is a vector, with one entry for each point in \code{X}, then the result is a vector containing the edge correction weights \code{e(X[i], r[i])} for each \code{i}. If \code{r} is a matrix, with one row for each point in \code{X}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], r[i,j])}. For example \code{edge.Ripley(X, pairdist(X))} computes all the edge corrections required for the \eqn{K}-function. If any value of the edge correction weight exceeds \code{maxwt}, it is set to \code{maxwt}. The function \code{rmax.Ripley} computes the smallest distance \eqn{r} such that it is possible to draw a circle of radius \eqn{r}, centred at a point of \code{W}, such that the circle does not intersect the interior of \code{W}. } \value{ A numeric vector or matrix. } \references{ Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{edge.Trans}}, \code{\link{rmax.Trans}}, \code{\link{Kest}} } \examples{ v <- edge.Ripley(cells, pairdist(cells)) rmax.Ripley(Window(cells)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/markvario.Rd0000644000176200001440000000717714643125461016651 0ustar liggesusers\name{markvario} \alias{markvario} \title{Mark Variogram} \description{ Estimate the mark variogram of a marked point pattern. } \usage{ markvario(X, correction = c("isotropic", "Ripley", "translate"), r = NULL, method = "density", ..., normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must have marks which are numeric. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} should be evaluated. There is a sensible default. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Other arguments passed to \code{\link{markcorr}}, or passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{If \code{TRUE}, normalise the variogram by dividing it by the estimated mark variance. } } \details{ The mark variogram \eqn{\gamma(r)}{gamma(r)} of a marked point process \eqn{X} is a measure of the dependence between the marks of two points of the process a distance \eqn{r} apart. It is informally defined as \deqn{ \gamma(r) = E[\frac 1 2 (M_1 - M_2)^2] }{ gamma(r) = E[(1/2) * (M1 - M2)^2 ] } where \eqn{E[ ]} denotes expectation and \eqn{M_1,M_2}{M1,M2} are the marks attached to two points of the process a distance \eqn{r} apart. The mark variogram of a marked point process is analogous, but \bold{not equivalent}, to the variogram of a random field in geostatistics. See Waelder and Stoyan (1996). } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} has been estimated } \item{theo}{the theoretical value of \eqn{\gamma(r)}{gamma(r)} when the marks attached to different points are independent; equal to the sample variance of the marks } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{\gamma(r)}{gamma(r)} obtained by the edge corrections named. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Mase, S. (1996) The threshold method for estimating annual rainfall. \emph{Annals of the Institute of Statistical Mathematics} \bold{48} (1996) 201-213. Waelder, O. and Stoyan, D. (1996) On variograms in point process statistics. \emph{Biometrical Journal} \bold{38} (1996) 895-905. } \seealso{ Mark correlation function \code{\link{markcorr}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ # Longleaf Pine data # marks represent tree diameter # Subset of this large pattern swcorner <- owin(c(0,100),c(0,100)) sub <- longleaf[ , swcorner] # mark correlation function mv <- markvario(sub) plot(mv) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Lest.Rd0000644000176200001440000000550214643125461015553 0ustar liggesusers\name{Lest} \alias{Lest} \title{L-function} \description{ Calculates an estimate of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Lest(X, ..., correction) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kest}} to control the estimation procedure. } } \details{ This command computes an estimate of the \eqn{L}-function for the spatial point pattern \code{X}. The \eqn{L}-function is a transformation of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the \eqn{K}-function. See \code{\link{Kest}} for information about Ripley's \eqn{K}-function. The transformation to \eqn{L} was proposed by Besag (1977). The command \code{Lest} first calls \code{\link{Kest}} to compute the estimate of the \eqn{K}-function, and then applies the square root transformation. For a completely random (uniform Poisson) point pattern, the theoretical value of the \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L(r)} is more appropriate for use in simulation envelopes and hypothesis tests. See \code{\link{Kest}} for the list of arguments. } \section{Variance approximations}{ If the argument \code{var.approx=TRUE} is given, the return value includes columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat L(r)}{Lest(r)} under CSR. These are obtained by the delta method from the variance approximations described in \code{\link{Kest}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ L <- Lest(cells) plot(L, main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/K3est.Rd0000644000176200001440000000732314643125461015640 0ustar liggesusers\name{K3est} \Rdversion{1.1} \alias{K3est} \title{ K-function of a Three-Dimensional Point Pattern } \description{ Estimates the \eqn{K}-function from a three-dimensional point pattern. } \usage{ K3est(X, \dots, rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), ratio=FALSE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the three-dimensional \eqn{K} function is \deqn{ K_3(r) = \frac 1 \lambda E(N(\Phi, x, r) \mid x \in \Phi) }{ K3(r) = (1/lambda) E(N(Phi,x,r) | x in Phi) } where \eqn{\lambda}{lambda} is the intensity of the process (the expected number of points per unit volume) and \eqn{N(\Phi,x,r)}{N(Phi,x,r)} is the number of points of \eqn{\Phi}{Phi}, other than \eqn{x} itself, which fall within a distance \eqn{r} of \eqn{x}. This is the three-dimensional generalisation of Ripley's \eqn{K} function for two-dimensional point processes (Ripley, 1977). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is renormalised to give the estimate of \eqn{K_3(r)}{K3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link[spatstat.geom]{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{pcf3est}}, \code{\link{F3est}}, \code{\link{G3est}} for other summary functions of a three-dimensional point pattern. \code{\link{Kest}} to estimate the \eqn{K}-function of point patterns in two dimensions or other spaces. } \examples{ X <- rpoispp3(42) Z <- K3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} \concept{Three-dimensional} spatstat.explore/man/methods.rho2hat.Rd0000644000176200001440000000412414611073324017647 0ustar liggesusers\name{methods.rho2hat} \alias{methods.rho2hat} %DoNotExport \alias{predict.rho2hat} \alias{print.rho2hat} \alias{plot.rho2hat} \title{ Methods for Intensity Functions of Two Spatial Covariates } \description{ These are methods for the class \code{"rho2hat"}. } \usage{ \method{plot}{rho2hat}(x, \dots, do.points=FALSE) \method{print}{rho2hat}(x, \dots) \method{predict}{rho2hat}(object, \dots, relative=FALSE) } \arguments{ \item{x,object}{ An object of class \code{"rho2hat"}. } \item{\dots}{ Arguments passed to other methods. } \item{do.points}{ Logical value indicating whether to plot the observed values of the covariates at the data points. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link{predict}} and \code{\link{plot}} for the class \code{"rho2hat"}. An object of class \code{"rho2hat"} is an estimate of the intensity of a point process, as a function of two given spatial covariates. See \code{\link{rho2hat}}. The method \code{plot.rho2hat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. In this plot the two axes represent possible values of the two covariates. The method \code{predict.rho2hat} computes a pixel image of the intensity \eqn{\rho(Z_1(u), Z_2(u))}{rho(Z1(u), Z2(u))} at each spatial location \eqn{u}, where \eqn{Z_1(u)}{Z1(u)} and \eqn{Z_2(u)}{Z2(u)} are the two spatial covariates. } \value{ For \code{predict.rho2hat} the value is a pixel image (object of class \code{"im"}). For other functions, the value is \code{NULL}. } \author{ \adrian } \seealso{ \code{\link{rho2hat}} } \examples{ r2 <- with(bei.extra, rho2hat(bei, elev, grad)) r2 plot(r2) plot(predict(r2)) } \keyword{spatial} \keyword{methods} spatstat.explore/man/Kinhom.Rd0000644000176200001440000003556414643125461016104 0ustar liggesusers\name{Kinhom} \alias{Kinhom} \title{Inhomogeneous K-function} \description{ Estimates the inhomogeneous \eqn{K} function of a non-stationary point pattern. } \usage{ Kinhom(X, lambda=NULL, \dots, r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, diagonal=TRUE, sigma=NULL, varcov=NULL, ratio=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link[spatstat.geom]{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed, using a fast algorithm. } \item{lambda2}{ Advanced use only. Matrix containing estimates of the products \eqn{\lambda(x_i)\lambda(x_j)}{lambda(x[i]) * lambda(x[j])} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{reciplambda2}{ Advanced use only. Alternative to \code{lambda2}. A matrix giving values of the estimated \emph{reciprocal products} \eqn{1/\lambda(x_i)\lambda(x_j)}{1/(lambda(x[i]) * lambda(x[j]))} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{diagonal}{ Do not use this argument. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)}. } \details{ This computes a generalisation of the \eqn{K} function for inhomogeneous point patterns, proposed by Baddeley, \Moller and Waagepetersen (2000). The ``ordinary'' \eqn{K} function (variously known as the reduced second order moment function and Ripley's \eqn{K} function), is described under \code{\link{Kest}}. It is defined only for stationary point processes. The inhomogeneous \eqn{K} function \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is a direct generalisation to nonstationary point processes. Suppose \eqn{x} is a point process with non-constant intensity \eqn{\lambda(u)}{lambda(u)} at each location \eqn{u}. Define \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} to be the expected value, given that \eqn{u} is a point of \eqn{x}, of the sum of all terms \eqn{1/\lambda(x_j)}{1/lambda(x[j])} over all points \eqn{x_j}{x[j]} in the process separated from \eqn{u} by a distance less than \eqn{r}. This reduces to the ordinary \eqn{K} function if \eqn{\lambda()}{lambda()} is constant. If \eqn{x} is an inhomogeneous Poisson process with intensity function \eqn{\lambda(u)}{lambda(u)}, then \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) = \pi r^2}{Kinhom(r) = pi * r^2}. Given a point pattern dataset, the inhomogeneous \eqn{K} function can be estimated essentially by summing the values \eqn{1/(\lambda(x_i)\lambda(x_j))}{1/(lambda(x[i]) * lambda(x[j]))} for all pairs of points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance less than \eqn{r}. This allows us to inspect a point pattern for evidence of interpoint interactions after allowing for spatial inhomogeneity of the pattern. Values \eqn{K_{\mbox{\scriptsize\rm inhom}}(r) > \pi r^2}{Kinhom(r) > pi * r^2} are suggestive of clustering. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Edge corrections are used to correct bias in the estimation of \eqn{K_{\mbox{\scriptsize\rm inhom}}}{Kinhom}. Each edge-corrected estimate of \eqn{K_{\mbox{\scriptsize\rm inhom}}(r)}{Kinhom(r)} is of the form \deqn{ \widehat K_{\mbox{\scriptsize\rm inhom}}(r) = (1/A) \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j,r)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/A) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j],r)/(lambda(x[i]) * lambda(x[j])) } where \code{A} is a constant denominator, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r)}{e(x[i],x[j],r)} is an edge correction factor. For the `border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\sum_j 1(b_j > r)/\lambda(x_j)} }{ 1(b[i] > r)/(sum[j] 1(b[j] > r)/lambda(x[j])) } where \eqn{b_i}{b[i]} is the distance from \eqn{x_i}{x[i]} to the boundary of the window. For the `modified border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\mbox{area}(W \ominus r)} }{ 1(b[i] > r)/area(W [-] r) } where \eqn{W \ominus r}{W [-] r} is the eroded window obtained by trimming a margin of width \eqn{r} from the border of the original window. For the `translation' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W \cap (W + (x_j - x_i)))} }{ 1/area(W intersect (W + x[j]-x[i])) } and for the `isotropic' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W) g(x_i,x_j)} }{ 1/(area(W) g(x[i],x[j])) } where \eqn{g(x_i,x_j)}{g(x[i],x[j])} is the fraction of the circumference of the circle with centre \eqn{x_i}{x[i]} and radius \eqn{||x_i - x_j||}{||x[i]-x[j]||} which lies inside the window. If \code{renormalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } If the point pattern \code{X} contains more than about 1000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. The pair correlation function can also be applied to the result of \code{Kinhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # inhomogeneous pattern of maples X <- unmark(split(lansing)$maple) \testonly{ sub <- sample(c(TRUE,FALSE), npoints(X), replace=TRUE, prob=c(0.1,0.9)) X <- X[sub] } if(require("spatstat.model")) { # (1) intensity function estimated by model-fitting # Fit spatial trend: polynomial in x and y coordinates fit <- ppm(X, ~ polynom(x,y,2), Poisson()) # (a) predict intensity values at points themselves, # obtaining a vector of lambda values lambda <- predict(fit, locations=X, type="trend") # inhomogeneous K function Ki <- Kinhom(X, lambda) plot(Ki) # (b) predict intensity at all locations, # obtaining a pixel image lambda <- predict(fit, type="trend") Ki <- Kinhom(X, lambda) plot(Ki) } # (2) intensity function estimated by heavy smoothing Ki <- Kinhom(X, sigma=0.1) plot(Ki) # (3) simulated data: known intensity function lamfun <- function(x,y) { 50 + 100 * x } # inhomogeneous Poisson process Y <- rpoispp(lamfun, 150, owin()) # inhomogeneous K function Ki <- Kinhom(Y, lamfun) plot(Ki) # How to make simulation envelopes: # Example shows method (2) if(interactive()) { smo <- density.ppp(X, sigma=0.1) Ken <- envelope(X, Kinhom, nsim=99, simulate=expression(rpoispp(smo)), sigma=0.1, correction="trans") plot(Ken) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/studpermu.test.Rd0000644000176200001440000001014514611073325017645 0ustar liggesusers\name{studpermu.test} \alias{studpermu.test} \title{ Studentised Permutation Test } \description{ Perform a studentised permutation test for a difference between groups of point patterns. } \usage{ studpermu.test(X, formula, summaryfunction = Kest, \dots, rinterval = NULL, nperm = 999, use.Tbar = FALSE, minpoints = 20, rsteps = 128, r = NULL, arguments.in.data = FALSE) } \arguments{ \item{X}{ Data. Either a \code{hyperframe} or a list of lists of point patterns. } \item{formula}{ Formula describing the grouping, when \code{X} is a hyperframe. The left side of the formula identifies which column of \code{X} contains the point patterns. The right side identifies the grouping factor. If the formula is missing, the grouping variable is taken to be the first column of \code{X} that contains a factor, and the point patterns are taken from the first column that contains point patterns. } \item{summaryfunction}{ Summary function applicable to point patterns. } \item{\dots}{ Additional arguments passed to \code{summaryfunction}. } \item{rinterval}{ Interval of distance values \eqn{r} over which the summary function should be evaluated and over which the test statistic will be integrated. If \code{NULL}, the default range of the summary statistic is used (taking the intersection of these ranges over all patterns). } \item{nperm}{ Number of random permutations for the test. } \item{use.Tbar}{ Logical value indicating choice of test statistic. If \code{TRUE}, use the alternative test statistic, which is appropriate for summary functions with roughly constant variance, such as \eqn{K(r)/r} or \eqn{L(r)}. } \item{minpoints}{ Minimum permissible number of points in a point pattern for inclusion in the test calculation. } \item{rsteps}{ Number of discretisation steps in the \code{rinterval}. } \item{r}{ Optional vector of distance values as the argument for \code{summaryfunction}. Should not usually be given. There is a sensible default. } \item{arguments.in.data}{ Logical. If \code{TRUE}, individual extra arguments to \code{summaryfunction} will be taken from \code{X} (which must be a hyperframe). This assumes that the first argument of \code{summaryfunction} is the point pattern dataset. } } \details{ This function performs the studentized permutation test of Hahn (2012) for a difference between groups of point patterns. The first argument \code{X} should be either \describe{ \item{a list of lists of point patterns.}{ Each element of \code{X} will be interpreted as a group of point patterns, assumed to be replicates of the same point process. } \item{a hyperframe:}{ One column of the hyperframe should contain point patterns, and another column should contain a factor indicating the grouping. The argument \code{formula} should be a formula in the \R language specifying the grouping: it should be of the form \code{P ~ G} where \code{P} is the name of the column of point patterns, and \code{G} is the name of the factor. } } A group needs to contain at least two point patterns with at least \code{minpoints} points in each pattern. The function returns an object of class \code{"htest"} and \code{"studpermutest"} that can be printed and plotted. The printout shows the test result and \eqn{p}-value. The plot shows the summary functions for the groups (and the group means if requested). } \value{ Object of class \code{"studpermutest"}. } \references{ Hahn, U. (2012) A studentized permutation test for the comparison of spatial point patterns. \emph{Journal of the American Statistical Association} \bold{107} (498), 754--764. } \seealso{ \code{\link{plot.studpermutest}} } \author{ Ute Hahn. Modified for \code{spatstat} by \spatstatAuthors. } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) testpyramidal } \keyword{spatial} \keyword{htest} \concept{Goodness-of-fit} spatstat.explore/man/plot.fasp.Rd0000644000176200001440000001205014611073324016541 0ustar liggesusers\name{plot.fasp} \alias{plot.fasp} \title{Plot a Function Array} \description{ Plots an array of summary functions, usually associated with a point pattern, stored in an object of class \code{"fasp"}. A method for \code{plot}. } \usage{ \method{plot}{fasp}(x,formule=NULL, \dots, subset=NULL, title=NULL, banner=TRUE, transpose=FALSE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) } \arguments{ \item{x}{An object of class \code{"fasp"} representing a function array. } \item{formule}{ A formula or list of formulae indicating what variables are to be plotted against what variable. Each formula is either an R language formula object, or a string that can be parsed as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. If the formula is left as \code{NULL}, then \code{plot.fasp} attempts to use the component \code{default.formula} of \code{x}. If that component is NULL as well, it gives up. } \item{\dots}{ Arguments passed to \code{\link{plot.fv}} to control the individual plot panels. } \item{subset}{ A logical vector, or a vector of indices, or an expression or a character string, or a \bold{list} of such, indicating a subset of the data to be included in each plot. If \code{subset} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. } \item{title}{ Overall title for the plot. } \item{banner}{ Logical. If \code{TRUE}, the overall title is plotted. If \code{FALSE}, the overall title is not plotted and no space is allocated for it. } \item{transpose}{ Logical. If \code{TRUE}, rows and columns will be exchanged. } \item{samex,samey}{ Logical values indicating whether all individual plot panels should have the same x axis limits and the same y axis limits, respectively. This makes it easier to compare the plots. } \item{mar.panel}{ Vector of length 4 giving the value of the graphics parameter \code{mar} controlling the size of plot margins for each individual plot panel. See \code{\link{par}}. } \item{outerlabels}{Logical. If \code{TRUE}, the row and column names of the array of functions are plotted in the margins of the array of plot panels. If \code{FALSE}, each individual plot panel is labelled by its row and column name. } \item{cex.outerlabels}{ Character expansion factor for row and column labels of array. } \item{legend}{ Logical flag determining whether to plot a legend in each panel. } } \details{ An object of class \code{"fasp"} represents an array of summary functions, usually associated with a point pattern. See \code{\link{fasp.object}} for details. Such an object is created, for example, by \code{\link[spatstat.explore]{alltypes}}. The function \code{plot.fasp} is a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the individual panels. For information about the interpretation of the arguments \code{formule} and \code{subset}, see \code{\link{plot.fv}}. Arguments that are often passed through \code{...} include \code{col} to control the colours of the different lines in a panel, and \code{lty} and \code{lwd} to control the line type and line width of the different lines in a panel. The argument \code{shade} can also be used to display confidence intervals or significance bands as filled grey shading. See \code{\link{plot.fv}}. The argument \code{title}, if present, will determine the overall title of the plot. If it is absent, it defaults to \code{x$title}. Titles for the individual plot panels will be taken from \code{x$titles}. } \value{None.} \section{Warnings}{ (Each component of) the \code{subset} argument may be a logical vector (of the same length as the vectors of data which are extracted from \code{x}), or a vector of indices, or an \bold{expression} such as \code{expression(r<=0.2)}, or a text string, such as \code{"r<=0.2"}. Attempting a syntax such as \code{subset = r<=0.2} (without wrapping \code{r<=0.2} either in quote marks or in \code{expression()}) will cause this function to fall over. Variables referred to in any formula must exist in the data frames stored in \code{x}. What the names of these variables are will of course depend upon the nature of \code{x}. } \seealso{ \code{\link[spatstat.explore]{alltypes}}, \code{\link{plot.fv}}, \code{\link{fasp.object}} } \examples{ if(interactive()) { X.G <- alltypes(amacrine,"G") plot(X.G) plot(X.G,subset="r<=0.2") plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo))) plot(X.G,fo=cbind(km,theo) - theo~r, subset="theo<=0.9") } } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat.explore/man/rectcontact.Rd0000644000176200001440000000306114643125462017154 0ustar liggesusers\name{rectcontact} \alias{rectcontact} \title{ Contact Distribution Function using Rectangular Structuring Element } \description{ Computes an estimate of the contact distribution function of a set, using a rectangular structuring element. } \usage{ rectcontact(X, \dots, asp = 1, npasses=4, eps = NULL, r = NULL, breaks = NULL, correction = c("rs", "km")) } \arguments{ \item{X}{ Logical-valued image. The \code{TRUE} values in the image determine the spatial region whose contact distribution function should be estimated. } \item{\dots}{ Ignored. } \item{asp}{ Aspect ratio for the rectangular metric. A single positive number. See \code{\link[spatstat.geom]{rectdistmap}} for explanation. } \item{npasses}{ Number of passes to perform in the distance algorithm. A positive integer. See \code{\link[spatstat.geom]{rectdistmap}} for explanation. } \item{eps}{ Pixel size, if the image should be converted to a finer grid. } \item{r}{ Optional vector of distance values. Do Not Use This. } \item{breaks}{ Do Not Use This. } \item{correction}{ Character vector specifying the edge correction. } } \details{ To be written. } \value{ Object of class \code{"fv"}. } \author{ \adrian. } \seealso{ \code{\link{Hest}} } \examples{ ## make an image which is TRUE/FALSE inside/outside the letter R V <- letterR Frame(V) <- grow.rectangle(Frame(V), 0.5) Z <- as.im(V, value=TRUE, na.replace=FALSE) ## analyse plot(rectcontact(Z)) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/marktable.Rd0000644000176200001440000000566014643125461016613 0ustar liggesusers\name{marktable} \alias{marktable} \title{Tabulate Marks in Neighbourhood of Every Point in a Point Pattern} \description{ Visit each point in a multitype point pattern, find the neighbouring points, and compile a frequency table of the marks of these neighbour points. } \usage{ marktable(X, R, N, exclude=TRUE, collapse=FALSE) } \arguments{ \item{X}{ A multitype point pattern. An object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}, with marks which are a factor. } \item{R}{ Neighbourhood radius. Incompatible with \code{N}. } \item{N}{ Number of neighbours of each point. Incompatible with \code{R}. } \item{exclude}{ Logical. If \code{exclude=TRUE}, the neighbours of a point do not include the point itself. If \code{exclude=FALSE}, a point belongs to its own neighbourhood. } \item{collapse}{ Logical. If \code{collapse=FALSE} (the default) the results for each point are returned as separate rows of a table. If \code{collapse=TRUE}, the results are aggregated according to the type of point. } } \value{ A contingency table (object of class \code{"table"}). If \code{collapse=FALSE}, the table has one row for each point in \code{X}, and one column for each possible mark value. If \code{collapse=TRUE}, the table has one row and one column for each possible mark value. } \details{ This algorithm visits each point in the point pattern \code{X}, inspects all the neighbouring points within a radius \code{R} of the current point (or the \code{N} nearest neighbours of the current point), and compiles a frequency table of the marks attached to the neighbours. The dataset \code{X} must be a multitype point pattern, that is, \code{marks(X)} must be a \code{factor}. If \code{collapse=FALSE} (the default), the result is a two-dimensional contingency table with one row for each point in the pattern, and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of point \code{i} that have mark \code{j}. If \code{collapse=TRUE}, this contingency table is aggregated according to the type of point, so that the result is a contingency table with one row and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of a point with mark \code{i} that have mark \code{j}. To perform more complicated calculations on the neighbours of every point, use \code{\link[spatstat.geom]{markstat}} or \code{\link[spatstat.geom]{applynbd}}. } \seealso{ \code{\link[spatstat.geom]{markstat}}, \code{\link[spatstat.geom]{applynbd}}, \code{\link{Kcross}}, \code{\link[spatstat.geom]{ppp.object}}, \code{\link{table}} } \examples{ head(marktable(amacrine, 0.1)) head(marktable(amacrine, 0.1, exclude=FALSE)) marktable(amacrine, N=1, collapse=TRUE) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{programming} spatstat.explore/man/compileK.Rd0000644000176200001440000001115514611073323016402 0ustar liggesusers\name{compileK} \alias{compileK} \alias{compilepcf} \title{ Generic Calculation of K Function and Pair Correlation Function } \description{ Low-level functions which calculate the estimated \eqn{K} function and estimated pair correlation function (or any similar functions) from a matrix of pairwise distances and optional weights. } \usage{ compileK(D, r, weights = NULL, denom = 1, check = TRUE, ratio = FALSE, fname = "K", samplesize=denom) compilepcf(D, r, weights = NULL, denom = 1, check = TRUE, endcorrect = TRUE, ratio=FALSE, \dots, fname = "g", samplesize=denom) } \arguments{ \item{D}{ A square matrix giving the distances between all pairs of points. } \item{r}{ An equally spaced, finely spaced sequence of distance values. } \item{weights}{ Optional numerical weights for the pairwise distances. A numeric matrix with the same dimensions as \code{D}. If absent, the weights are taken to equal 1. } \item{denom}{ Denominator for the estimator. A single number, or a numeric vector with the same length as \code{r}. See Details. } \item{check}{ Logical value specifying whether to check that \code{D} is a valid matrix of pairwise distances. } \item{ratio}{ Logical value indicating whether to store ratio information. See Details. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{density.default}} controlling the kernel smoothing. } \item{endcorrect}{ Logical value indicating whether to apply End Correction of the pair correlation estimate at \code{r=0}. } \item{fname}{ Character string giving the name of the function being estimated. } \item{samplesize}{ The sample size that should be used as the denominator when \code{ratio=TRUE}. } } \details{ These low-level functions construct estimates of the \eqn{K} function or pair correlation function, or any similar functions, given only the matrix of pairwise distances and optional weights associated with these distances. These functions are useful for code development and for teaching, because they perform a common task, and do the housekeeping required to make an object of class \code{"fv"} that represents the estimated function. However, they are not very efficient. \code{compileK} calculates the weighted estimate of the \eqn{K} function, \deqn{ \hat K(r) = (1/v(r)) \sum_i \sum_j 1\{ d_{ij} \le r\} w_{ij} }{ K(r) = (1/v(r)) \sum[i] \sum[j] 1(d[i,j] \le r) w[i,j] } and \code{compilepcf} calculates the weighted estimate of the pair correlation function, \deqn{ \hat g(r) = (1/v(r)) \sum_i \sum_j \kappa( d_{ij} - r ) w_{ij} }{ g(r) = (1/v(r)) \sum[i] \sum[j] \kappa ( d[i,j] - r) w[i,j] } where \eqn{d_{ij}}{d[i,j]} is the distance between spatial points \eqn{i} and \eqn{j}, with corresponding weight \eqn{w_{ij}}{w[i,j]}, and \eqn{v(r)} is a specified denominator. Here \eqn{\kappa}{\kappa} is a fixed-bandwidth smoothing kernel. For a point pattern in two dimensions, the usual denominator \eqn{v(r)} is constant for the \eqn{K} function, and proportional to \eqn{r} for the pair correlation function. See the Examples. The result is an object of class \code{"fv"} representing the estimated function. This object has only one column of function values. Additional columns (such as a column giving the theoretical value) must be added by the user, with the aid of \code{\link{bind.fv}}. If \code{ratio=TRUE}, the result also belongs to class \code{"rat"} and has attributes containing the numerator and denominator of the function estimate. (If \code{samplesize} is given, the numerator and denominator are rescaled by a common factor so that the denominator is equal to \code{samplesize}.) This allows function estimates from several datasets to be pooled using \code{\link{pool}}. } \value{ An object of class \code{"fv"} representing the estimated function. } \author{ \adrian } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} for definitions of the \eqn{K} function and pair correlation function. \code{\link{bind.fv}} to add more columns. \code{\link{compileCDF}} for the corresponding low-level utility for estimating a cumulative distribution function. } \examples{ ## Equivalent to Kest(japanesepines) and pcf(japanesepines) X <- japanesepines D <- pairdist(X) Wt <- edge.Ripley(X, D) lambda <- intensity(X) a <- (npoints(X)-1) * lambda r <- seq(0, 0.25, by=0.01) K <- compileK(D=D, r=r, weights=Wt, denom=a) g <- compilepcf(D=D, r=r, weights=Wt, denom= a * 2 * pi * r) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pcf.fasp.Rd0000644000176200001440000001110014611073324016326 0ustar liggesusers\name{pcf.fasp} \alias{pcf.fasp} \title{Pair Correlation Function obtained from array of K functions} \description{ Estimates the (bivariate) pair correlation functions of a point pattern, given an array of (bivariate) K functions. } \usage{ \method{pcf}{fasp}(X, \dots, method="c") } \arguments{ \item{X}{ An array of multitype \eqn{K} functions (object of class \code{"fasp"}). } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. This can be thought of as a matrix \code{Y} each of whose entries \code{Y[i,j]} is a function value table (class \code{"fv"}) representing the pair correlation function between points of type \code{i} and points of type \code{j}. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an array of estimates of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivatives. It is a method for the generic function \code{\link{pcf}}. The argument \code{X} should be a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) containing several estimates of \eqn{K} functions. This should have been obtained from \code{\link{alltypes}} with the argument \code{fun="K"}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # multitype point pattern KK <- alltypes(amacrine, "K") p <- pcf.fasp(KK, spar=0.5, method="b") plot(p) # strong inhibition between points of the same type } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pcfcross.inhom.Rd0000644000176200001440000001254014611073324017572 0ustar liggesusers\name{pcfcross.inhom} \alias{pcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-Type) } \description{ Estimates the inhomogeneous cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross.inhom(X, i, j, lambdaI = NULL, lambdaJ = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, adjust.bw = 1, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, adjust.sigma = 1, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity function of the points of type \code{j}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{ij}(r)}{g[i,j](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of one-dimensional smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for one-dimensional smoothing kernel, passed to \code{\link{density.default}}. } \item{adjust.bw}{ Numeric value. \code{bw} will be multiplied by this value. } \item{\dots}{ Other arguments passed to the one-dimensional kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdaJ} is estimated by spatial kernel smoothing. } \item{adjust.sigma}{ Numeric value. \code{sigma} will be multiplied by this value. } } \details{ The inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} is a summary of the dependence between two types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points, of types \eqn{i} and \eqn{j} respectively, at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda_i(x) lambda_j(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda[j](y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda_j(y)}{p(r) = lambda[i](x) * lambda[j](y)} so \eqn{g_{ij}(r) = 1}{g[i,j](r) = 1}. The command \code{pcfcross.inhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. The arguments \code{bw} and \code{adjust.bw} control the degree of one-dimensional smoothing of the estimate of pair correlation. If the arguments \code{lambdaI} and/or \code{lambdaJ} are missing or null, they will be estimated from \code{X} by spatial kernel smoothing using a leave-one-out estimator, computed by \code{\link{density.ppp}}. The arguments \code{sigma}, \code{varcov} and \code{adjust.sigma} control the degree of spatial smoothing. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{ij}(r)}{g[i,j](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfcross}}, \code{\link{pcfdot.inhom}} } \examples{ plot(pcfcross.inhom(amacrine, "on", "off", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bw.stoyan.Rd0000644000176200001440000000355614611073323016571 0ustar liggesusers\name{bw.stoyan} \alias{bw.stoyan} \title{ Stoyan's Rule of Thumb for Bandwidth Selection } \description{ Computes a rough estimate of the appropriate bandwidth for kernel smoothing estimators of the pair correlation function and other quantities. } \usage{ bw.stoyan(X, co=0.15) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{co}{ Coefficient appearing in the rule of thumb. See Details. } } \details{ Estimation of the pair correlation function and other quantities by smoothing methods requires a choice of the smoothing bandwidth. Stoyan and Stoyan (1995, equation (15.16), page 285) proposed a rule of thumb for choosing the smoothing bandwidth. For the Epanechnikov kernel, the rule of thumb is to set the kernel's half-width \eqn{h} to \eqn{0.15/\sqrt{\lambda}}{0.15/sqrt(\lambda)} where \eqn{\lambda}{\lambda} is the estimated intensity of the point pattern, typically computed as the number of points of \code{X} divided by the area of the window containing \code{X}. For a general kernel, the corresponding rule is to set the standard deviation of the kernel to \eqn{\sigma = 0.15/\sqrt{5\lambda}}{\sigma = 0.15/sqrt(5 * \lambda)}. The coefficient \eqn{0.15} can be tweaked using the argument \code{co}. To ensure the bandwidth is finite, an empty point pattern is treated as if it contained 1 point. } \value{ A finite positive numerical value giving the selected bandwidth (the standard deviation of the smoothing kernel). } \seealso{ \code{\link{pcf}}, \code{\link{bw.relrisk}} } \examples{ bw.stoyan(shapley) } \references{ Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/allstats.Rd0000644000176200001440000000604714643125461016500 0ustar liggesusers\name{allstats} \alias{allstats} \title{Calculate four standard summary functions of a point pattern.} \description{ Calculates the \eqn{F}, \eqn{G}, \eqn{J}, and \eqn{K} summary functions for an unmarked point pattern. Returns them as a function array (of class \code{"fasp"}, see \code{\link{fasp.object}}). } \usage{ allstats(pp, \dots, dataname=NULL, verb=FALSE) } \arguments{ \item{pp}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"}. It must not be marked. } \item{\dots}{ Optional arguments passed to the summary functions \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}}. } \item{dataname}{A character string giving an optional (alternative) name for the point pattern. } \item{verb}{A logical value meaning ``verbose''. If \code{TRUE}, progress reports are printed during calculation. } } \details{ This computes four standard summary statistics for a point pattern: the empty space function \eqn{F(r)}, nearest neighbour distance distribution function \eqn{G(r)}, van Lieshout-Baddeley function \eqn{J(r)} and Ripley's function \eqn{K(r)}. The real work is done by \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}} respectively. Consult the help files for these functions for further information about the statistical interpretation of \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K}. If \code{verb} is \code{TRUE}, then ``progress reports'' (just indications of completion) are printed out when the calculations are finished for each of the four function types. The overall title of the array of four functions (for plotting by \code{\link{plot.fasp}}) will be formed from the argument \code{dataname}. If this is not given, it defaults to the expression for \code{pp} given in the call to \code{allstats}. } \value{ A list of length 4 containing the \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K} functions respectively. The list can be plotted directly using \code{plot} (which dispatches to \code{\link[spatstat.geom]{plot.anylist}}). Each list entry retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} or \code{\link{Kest}}. Thus each entry in the list is a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J, and \code{cbind(trans,theo) ~ r} for K. } \author{\adrian and \rolf } \seealso{ \code{\link[spatstat.geom]{plot.anylist}}, \code{\link{plot.fv}}, \code{\link{fv.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} } \examples{ a <- allstats(swedishpines,dataname="Swedish Pines") if(interactive()) { plot(a) plot(a, subset=list("r<=15","r<=15","r<=15","r<=50")) } } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/plot.cdftest.Rd0000644000176200001440000000661314611073324017254 0ustar liggesusers\name{plot.cdftest} \alias{plot.cdftest} \title{Plot a Spatial Distribution Test} \description{ Plot the result of a spatial distribution test computed by \code{cdf.test}. } \usage{ \method{plot}{cdftest}(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=2, lty0=2, do.legend) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"cdftest"} produced by a method for \code{\link{cdf.test}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.default}}. } \item{style}{ Style of plot. See Details. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical curve (the empirical distribution, or PP plot or QQ plot). } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the reference curve (the predicted distribution, or the diagonal). } \item{do.legend}{ Logical value indicating whether to add an explanatory legend. Applies only when \code{style="cdf"}. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"cdftest"}. An object of this class represents the outcome of a spatial distribution test, computed by \code{\link{cdf.test}}, and based on either the Kolmogorov-Smirnov, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises or Anderson-Darling test. If \code{style="cdf"} (the default), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, both plotted against the value of the covariate. The Kolmogorov-Smirnov test statistic (for example) is the maximum vertical separation between the two curves. If \code{style="PP"} then the P-P plot is drawn. The \eqn{x} coordinates of the plot are cumulative probabilities for the covariate under the model. The \eqn{y} coordinates are cumulative probabilities for the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic is the maximum vertical separation between the P-P plot and the diagonal reference line. If \code{style="QQ"} then the Q-Q plot is drawn. The \eqn{x} coordinates of the plot are quantiles of the covariate under the model. The \eqn{y} coordinates are quantiles of the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic cannot be read off the Q-Q plot. } \seealso{ \code{\link{cdf.test}} } \examples{ op <- options(useFancyQuotes=FALSE) plot(cdf.test(cells, "x")) if(require("spatstat.model")) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- cdf.test(fit0, xcoord) # plot result of test plot(k, lwd0=3) plot(k, style="PP") plot(k, style="QQ") } options(op) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{hplot} \concept{Goodness-of-fit} spatstat.explore/man/distcdf.Rd0000644000176200001440000001052514643125461016265 0ustar liggesusers\name{distcdf} \alias{distcdf} \title{Distribution Function of Interpoint Distance } \description{ Computes the cumulative distribution function of the distance between two independent random points in a given window or windows. } \usage{ distcdf(W, V=W, \dots, dW=1, dV=dW, nr=1024, regularise=TRUE, savedenom=FALSE, delta=NULL) } \arguments{ \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{as.mask}} to determine the pixel resolution for the calculation. } \item{dV, dW}{ Optional. Probability densities (not necessarily normalised) for the first and second random points respectively. Data in any format acceptable to \code{\link[spatstat.geom]{as.im}}, for example, a \code{function(x,y)} or a pixel image or a numeric value. The default corresponds to a uniform distribution over the window. } \item{nr}{ Integer. The number of values of interpoint distance \eqn{r} for which the CDF will be computed. Should be a large value. Alternatively if \code{nr=NULL}, a good default value will be chosen, depending on the pixel resolution. } \item{regularise}{ Logical value indicating whether to smooth the results for very small distances, to avoid discretisation artefacts. } \item{savedenom}{ Logical value indicating whether to save the denominator of the double integral as an attribute of the result. } \item{delta}{ Optional. A positive number. The maximum permitted spacing between values of the function argument. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \details{ This command computes the Cumulative Distribution Function \eqn{ CDF(r) = Prob(T \le r) }{ CDF(r) = Prob(T \le r) } of the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{distcdf(W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. In the most general case the first point \eqn{X_1}{X1} is random in the window \code{W} with a probability density proportional to \code{dW}, and the second point \eqn{X_2}{X2} is random in a different window \code{V} with probability density proportional to \code{dV}. The values of \code{dW} and \code{dV} must be finite and nonnegative. The calculation is performed by numerical integration of the set covariance function \code{\link[spatstat.geom]{setcov}} for uniformly distributed points, and by computing the covariance function \code{\link[spatstat.geom]{imcov}} in the general case. The accuracy of the result depends on the pixel resolution used to represent the windows: this is controlled by the arguments \code{\dots} which are passed to \code{\link[spatstat.geom]{as.mask}}. For example use \code{eps=0.1} to specify pixels of size 0.1 units. The arguments \code{W} or \code{V} may also be point patterns (objects of class \code{"ppp"}). The result is the cumulative distribution function of the distance from a randomly selected point in the point pattern, to a randomly selected point in the other point pattern or window. If \code{regularise=TRUE} (the default), values of the cumulative distribution function for very short distances are smoothed to avoid discretisation artefacts. Smoothing is applied to all distances shorter than the width of 10 pixels. Numerical accuracy of some calculations requires very fine spacing of the values of the function argument \code{r}. If the argument \code{delta} is given, then after the cumulative distribution function has been calculated, it will be interpolated onto a finer grid of \code{r} values with spacing less than or equal to \code{delta}. } \seealso{ \code{\link[spatstat.geom]{setcov}}, \code{\link[spatstat.geom]{as.mask}}. } \examples{ # The unit disc B <- disc() plot(distcdf(B)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.explore/man/bw.ppl.Rd0000644000176200001440000001071114666524717016060 0ustar liggesusers\name{bw.ppl} \alias{bw.ppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.ppl(X, \dots, srange=NULL, ns=16, sigma=NULL, varcov1=NULL, weights=NULL, shortcut=TRUE, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{varcov1}{ Optional. Variance-covariance matrix matrix of the kernel with bandwidth \eqn{h=1}. See section on Anisotropic Smoothing. } \item{weights}{ Optional. Numeric vector of weights for the points of \code{X}. Argument passed to \code{\link{density.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}}. } \item{shortcut}{ Logical value indicating whether to speed up the calculation by omitting the integral term in the cross-validation criterion. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_W \hat\lambda(u) \, {\rm d}u }{ LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[W] \lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}, and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{\sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. If \code{shortcut=TRUE} (the default), the computation is accelerated by omitting the integral term in the equation above. This is valid because the integral is approximately constant. } \section{Anisotropic Smoothing}{ Anisotropic kernel smoothing is available in \code{\link{density.ppp}} using the argument \code{varcov} to specify the variance-covariance matrix of the anisotropic kernel. In order to choose the matrix \code{varcov}, the user can call \code{bw.ppl} using the argument \code{varcov1} to specify a \sQuote{template} matrix. Scalar multiples of \code{varcov1} will be considered and the optimal scale factor will be determined. That is, \code{bw.ppl} will try smoothing the data using \code{varcov = h^2 * varcov1} for different values of \code{h}. The result of \code{bw.ppl} will be the optimal value of \code{h}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ if(interactive()) { b <- bw.ppl(redwood) plot(b, main="Likelihood cross validation for redwoods") plot(density(redwood, b)) } \testonly{ b1 <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2) b2 <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2, shortcut=FALSE) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/Math.fasp.Rd0000644000176200001440000000561314611073323016462 0ustar liggesusers\name{Math.fasp} \alias{Math.fasp} \alias{Ops.fasp} \alias{Complex.fasp} \alias{Summary.fasp} \title{S3 Group Generic Methods for Function Arrays} \description{ These are group generic methods for objects of class \code{"fasp"}, which allows for usual mathematical functions and operators to be applied directly to function arrays. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm=FALSE, drop=TRUE)} %NAMESPACE S3method("Math", "fasp") %NAMESPACE S3method("Ops", "fasp") %NAMESPACE S3method("Complex", "fasp") %NAMESPACE S3method("Summary", "fasp") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"fasp"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{ Logical value specifying whether missing values should be removed. } } \details{ Below is a list of mathematical functions and operators which are defined for objects of class \code{"fasp"}. The methods are implemented using \code{\link{eval.fasp}}, which tries to harmonise the functions via \code{\link{harmonise.fv}} if they aren't compatible to begin with. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } For the \code{Ops} group, one of the arguments is permitted to be a single atomic value, or a function table, instead of a function array. } \seealso{ \code{\link{eval.fasp}} for evaluating expressions involving function arrays. } \examples{ ## convert array of K functions to array of L functions K <- alltypes(amacrine, "K") L <- sqrt(K/pi) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat.explore/man/relrisk.Rd0000644000176200001440000000276514611073324016322 0ustar liggesusers\name{relrisk} \alias{relrisk} \title{ Estimate of Spatially-Varying Relative Risk } \description{ Generic command to estimate the spatially-varying probability of each type of point, or the ratios of such probabilities. } \usage{ relrisk(X, \dots) } \arguments{ \item{X}{ Either a point pattern (class \code{"ppp"}) or a fitted point process model (class \code{"ppm"}) from which the probabilities will be estimated. } \item{\dots}{ Additional arguments appropriate to the method. } } \details{ In a point pattern containing several different types of points, we may be interested in the spatially-varying probability of each possible type, or the relative risks which are the ratios of such probabilities. The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. The function \code{\link{relrisk.ppp}} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. The function \code{\link[spatstat.model]{relrisk.ppm}} is the method for fitted point process models (class \code{"ppm"}). It computes \emph{parametric} estimates of relative risk, using the fitted model. } \seealso{ \code{\link{relrisk.ppp}}, \code{\link[spatstat.model]{relrisk.ppm}}. } \value{ A pixel image, or a list of pixel images, or a numeric vector or matrix, containing the requested estimates of relative risk. } \author{ \spatstatAuthors. } \keyword{spatial} spatstat.explore/man/fvnames.Rd0000644000176200001440000000517114611073324016300 0ustar liggesusers\name{fvnames} \alias{fvnames} \alias{fvnames<-} \title{ Abbreviations for Groups of Columns in Function Value Table } \description{ Groups of columns in a function value table (object of class \code{"fv"}) identified by standard abbreviations. } \usage{ fvnames(X, a = ".") fvnames(X, a = ".") <- value } \arguments{ \item{X}{ Function value table (object of class \code{"fv"}). See \code{\link{fv.object}}. } \item{a}{ One of the standard abbreviations listed below. } \item{value}{ Character vector containing names of columns of \code{X}. } } \details{ An object of class \code{"fv"} represents a table of values of a function, usually a summary function for spatial data such as the \eqn{K}-function, for which several different statistical estimators may be available. The different estimates are stored as columns of the table. Auxiliary information carried in the object \code{X} specifies some columns or groups of columns of this table that should be used for particular purposes. For convenience these groups can be referred to by standard abbreviations which are recognised by various functions in the \pkg{spatstat} package, such as \code{\link{plot.fv}}. These abbreviations are: \tabular{ll}{ \code{".x"} \tab the function argument \cr \code{".y"} \tab the recommended value of the function \cr \code{"."} \tab all function values to be plotted by default \cr \tab (in order of plotting) \cr \code{".s"} \tab the upper and lower limits of shading \cr \tab (for envelopes and confidence intervals)\cr \code{".a"} \tab all function values (in column order) } The command \code{fvnames(X, a)} expands the abbreviation \code{a} and returns a character vector containing the names of the columns. The assignment \code{fvnames(X, a) <- value} changes the definition of the abbreviation \code{a} to the character string \code{value} (which should be the name of another column of \code{X}). The column names of \code{X} are not changed. Note that \code{fvnames(x, ".")} lists the columns of values that will be plotted by default, in the order that they would be plotted, not in order of the column position. The order in which curves are plotted affects the colours and line styles associated with the curves. } \value{ For \code{fvnames}, a character vector. For \code{fvnames<-}, the updated object. } \author{\adrian and \rolf } \seealso{ \code{\link{fv.object}}, \code{\link{plot.fv}} } \examples{ K <- Kest(cells) fvnames(K, ".y") fvnames(K, ".y") <- "trans" } \keyword{spatial} \keyword{manip} spatstat.explore/man/clarkevans.test.Rd0000644000176200001440000001066214611073323017750 0ustar liggesusers\name{clarkevans.test} \alias{clarkevans.test} \title{Clark and Evans Test} \description{ Performs the Clark-Evans test of aggregation for a spatial point pattern. } \usage{ clarkevans.test(X, ..., correction, clipregion=NULL, alternative=c("two.sided", "less", "greater", "clustered", "regular"), method=c("asymptotic", "MonteCarlo"), nsim=999) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string. The type of edge correction to be applied. See \code{\link{clarkevans}} and Details below. } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See \code{\link{clarkevans}} } \item{alternative}{ String indicating the type of alternative for the hypothesis test. Partially matched. } \item{method}{ Character string (partially matched) specifying how to calculate the \eqn{p}-value of the test. See Details. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo \eqn{p}-value is required. } } \details{ This command uses the Clark and Evans (1954) aggregation index \eqn{R} as the basis for a crude test of clustering or ordering of a point pattern. The Clark-Evans \emph{aggregation index} \eqn{R} is computed by the separate function \code{\link{clarkevans}}. This command \code{clarkevans.text} performs a hypothesis test of clustering or ordering of the point pattern \code{X} based on the Clark-Evans index \eqn{R}. The null hypothesis is Complete Spatial Randomness, i.e.\ a uniform Poisson process. The alternative hypothesis is specified by the argument \code{alternative}: \itemize{ \item \code{alternative="less"} or \code{alternative="clustered"}: the alternative hypothesis is that \eqn{R < 1} corresponding to a clustered point pattern; \item \code{alternative="greater"} or \code{alternative="regular"}: the alternative hypothesis is that \eqn{R > 1} corresponding to a regular or ordered point pattern; \item \code{alternative="two.sided"}: the alternative hypothesis is that \eqn{R \neq 1}{R != 1} corresponding to a clustered or regular pattern. } The Clark-Evans index \eqn{R} is first computed for the point pattern dataset \code{X} using the edge correction determined by the arguments \code{correction} and \code{clipregion}. These arguments are documented in the help file for \code{\link{clarkevans}}. If \code{method="asymptotic"} (the default), the \eqn{p}-value for the test is computed by standardising \eqn{R} as proposed by Clark and Evans (1954) and referring the standardised statistic to the standard Normal distribution. For this asymptotic test, the default edge correction is \code{correction="Donnelly"} if the window of \code{X} is a rectangle, and \code{correction="cdf"} otherwise. It is strongly recommended to avoid using \code{correction="none"} which would lead to a severely biased test. If \code{method="MonteCarlo"}, the \eqn{p}-value for the test is computed by comparing the observed value of \eqn{R} to the results obtained from \code{nsim} simulated realisations of Complete Spatial Randomness conditional on the observed number of points. This test is theoretically exact for any choice of edge correction, but may have lower power than the asymptotic test. For this Monte Carlo test, the default edge correction is \code{correction="none"} for computational efficiency. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations. \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In \emph{Simulation methods in archaeology}, Cambridge University Press, pp 91--95. } \author{ \adrian } \seealso{ \code{\link{clarkevans}}, \code{\link{hopskel.test}} } \examples{ # Redwood data - clustered clarkevans.test(redwood) clarkevans.test(redwood, alternative="clustered") clarkevans.test(redwood, correction="cdf", method="MonteCarlo", nsim=39) } \keyword{spatial} \keyword{nonparametric} \keyword{htest} \concept{Test of randomness} spatstat.explore/man/localK.Rd0000644000176200001440000001117314611073324016045 0ustar liggesusers\name{localK} \alias{localK} \alias{localL} \title{Neighbourhood density function} \description{ Computes the neighbourhood density function, a local version of the \eqn{K}-function or \eqn{L}-function, defined by Getis and Franklin (1987). } \usage{ localK(X, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localL(X, ..., rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{correction}{String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ The command \code{localL} computes the \emph{neighbourhood density function}, a local version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) that was proposed by Getis and Franklin (1987). The command \code{localK} computes the corresponding local analogue of the K-function. Given a spatial point pattern \code{X}, the neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ L[i](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the L function. By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{localKinhom} and \code{localLinhom}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \references{ Getis, A. and Franklin, J. (1987) Second-order neighbourhood analysis of mapped point patterns. \emph{Ecology} \bold{68}, 473--477. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{localKinhom}}, \code{\link{localLinhom}}. } \examples{ X <- ponderosa # compute all the local L functions L <- localL(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) # Spatially interpolate the values of L12 # Compare Figure 5(b) of Getis and Franklin (1987) X12 <- X \%mark\% L12 Z <- Smooth(X12, sigma=5, dimyx=128) plot(Z, col=topo.colors(128), main="smoothed neighbourhood density") contour(Z, add=TRUE) points(X, pch=16, cex=0.5) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Jinhom.Rd0000644000176200001440000001607114643125461016073 0ustar liggesusers\name{Jinhom} \alias{Jinhom} \title{ Inhomogeneous J-function } \description{ Estimates the inhomogeneous \eqn{J} function of a non-stationary point pattern. } \usage{ Jinhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio=FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{J} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link[spatstat.geom]{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{J}-function (Van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the \eqn{J} function for homogeneous point patterns computed by \code{\link{Jest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{J} function is computed as \eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))} where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F} functions computed using the border correction (equations (7) and (6) respectively in Van Lieshout, 2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Finhom}}, \code{\link{Jest}} } \examples{ online <- interactive() if(online) { plot(Jinhom(swedishpines, sigma=10)) plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2)) } else { ## use a coarse grid for faster computation and package testing plot(Jinhom(swedishpines, sigma=10, dimyx=32)) } } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/miplot.Rd0000644000176200001440000000400114643125461016141 0ustar liggesusers\name{miplot} \alias{miplot} \title{Morisita Index Plot} \description{ Displays the Morisita Index Plot of a spatial point pattern. } \usage{ miplot(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} } \details{ Morisita (1959) defined an index of spatial aggregation for a spatial point pattern based on quadrat counts. The spatial domain of the point pattern is first divided into \eqn{Q} subsets (quadrats) of equal size and shape. The numbers of points falling in each quadrat are counted. Then the Morisita Index is computed as \deqn{ \mbox{MI} = Q \frac{\sum_{i=1}^Q n_i (n_i - 1)}{N(N-1)} }{ MI = Q * sum(n[i] (n[i]-1))/(N(N-1)) } where \eqn{n_i}{n[i]} is the number of points falling in the \eqn{i}-th quadrat, and \eqn{N} is the total number of points. If the pattern is completely random, \code{MI} should be approximately equal to 1. Values of \code{MI} greater than 1 suggest clustering. The \emph{Morisita Index plot} is a plot of the Morisita Index \code{MI} against the linear dimension of the quadrats. The point pattern dataset is divided into \eqn{2 \times 2}{2 * 2} quadrats, then \eqn{3 \times 3}{3 * 3} quadrats, etc, and the Morisita Index is computed each time. This plot is an attempt to discern different scales of dependence in the point pattern data. } \value{ None. } \references{ M. Morisita (1959) Measuring of the dispersion of individuals and analysis of the distributional patterns. Memoir of the Faculty of Science, Kyushu University, Series E: Biology. \bold{2}: 215--235. } \seealso{ \code{\link[spatstat.geom]{quadratcount}} } \examples{ miplot(longleaf) opa <- par(mfrow=c(2,3)) plot(cells) plot(japanesepines) plot(redwood) miplot(cells) miplot(japanesepines) miplot(redwood) par(opa) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Extract.ssf.Rd0000644000176200001440000000145614611073324017047 0ustar liggesusers\name{[.ssf} \alias{[.ssf} \title{ Subset of spatially sampled function } \description{ Extract a subset of the data for a spatially sampled function. } \usage{ \method{[}{ssf}(x, i, j, ..., drop) } \arguments{ \item{x}{ Object of class \code{"ssf"}. } \item{i}{ Subset index applying to the locations where the function is sampled. } \item{j}{ Subset index applying to the columns (variables) measured at each location. } \item{\dots, drop}{ Ignored. } } \details{ This is the subset operator for the class \code{"ssf"}. } \value{ Another object of class \code{"ssf"}. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link{with.ssf}} } \examples{ f <- ssf(cells, data.frame(d=nndist(cells), i=1:42)) f f[1:10,] f[ ,1] } \keyword{spatial} \keyword{manip} spatstat.explore/man/Ksector.Rd0000644000176200001440000000557714643125461016272 0ustar liggesusers\name{Ksector} \alias{Ksector} \title{Sector K-function} \description{ A directional counterpart of Ripley's \eqn{K} function, in which pairs of points are counted only when the vector joining the pair happens to lie in a particular range of angles. } \usage{ Ksector(X, begin = 0, end = 360, \dots, units = c("degrees", "radians"), r = NULL, breaks = NULL, correction = c("border", "isotropic", "Ripley", "translate"), domain=NULL, ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{begin,end}{ Numeric values giving the range of angles inside which points will be counted. Angles are measured in degrees (if \code{units="degrees"}, the default) or radians (if \code{units="radians"}) anti-clockwise from the positive \eqn{x}-axis. } \item{\dots}{Ignored.} \item{units}{ Units in which the angles \code{begin} and \code{end} are expressed. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical value indicating whether to print progress reports and warnings. } } \details{ This is a directional counterpart of Ripley's \eqn{K} function (see \code{\link{Kest}}) in which, instead of counting all pairs of points within a specified distance \eqn{r}, we count only the pairs \eqn{(x_i, x_j)}{x[i], x[j]} for which the vector \eqn{x_j - x_i}{x[j] - x[i]} falls in a particular range of angles. This can be used to evaluate evidence for anisotropy in the point pattern \code{X}. } \value{ An object of class \code{"fv"} containing the estimated function. } \seealso{ \code{\link{Kest}} } \examples{ K <- Ksector(swedishpines, 0, 90) plot(K) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/GmultiInhom.Rd0000644000176200001440000000702214643125461017077 0ustar liggesusers\name{GmultiInhom} \alias{GmultiInhom} \alias{Gmulti.inhom} \title{ Inhomogeneous Marked G-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{G} function, effectively the cumulative distribution function of the distance from a point in subset \eqn{I} to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ Gmulti.inhom(X, I, J, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) GmultiInhom(X, I, J, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{I}{ A subset index specifying the subset of points \emph{from} which distances are measured. Any kind of subset index acceptable to \code{\link[spatstat.geom]{[.ppp}}. } \item{J}{ A subset index specifying the subset of points \emph{to} which distances are measured. Any kind of subset index acceptable to \code{\link[spatstat.geom]{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaI,lambdaJ}. } \item{lambdaI}{ Intensity estimates for each point of \code{X[I]}. A numeric vector of length equal to \code{npoints(X[I])}. Incompatible with \code{lambda}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Ignored. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ See Cronie and Van Lieshout (2015). The functions \code{GmultiInhom} and \code{Gmulti.inhom} are identical. } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{G} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Ginhom}}, \code{\link{Gmulti}} } \examples{ X <- rescale(amacrine) I <- (marks(X) == "on") J <- (marks(X) == "off") if(interactive() && require(spatstat.model)) { ## how to do it normally mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 } else { ## for package testing lam <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 } plot(GmultiInhom(X, I, J, lambda=lam, lambdamin=lmin)) # equivalent plot(GmultiInhom(X, I, J, lambdaI=lam[I], lambdaJ=lam[J], lambdamin=lmin), main="") } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pcfdot.inhom.Rd0000644000176200001440000001252114611073324017226 0ustar liggesusers\name{pcfdot.inhom} \alias{pcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Type-i-To-Any-Type) } \description{ Estimates the inhomogeneous multitype pair correlation function (from type \eqn{i} to any type) for a multitype point pattern. } \usage{ pcfdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, adjust.bw=1, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, adjust.sigma = 1, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity function of the point pattern \code{X}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{kernel}{ Choice of one-dimensional smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for one-dimensional smoothing kernel, passed to \code{\link{density.default}}. } \item{adjust.bw}{ Numeric value. \code{bw} will be multiplied by this value. } \item{\dots}{ Other arguments passed to the one-dimensional kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} and/or \code{lambdadot} is estimated by spatial kernel smoothing. } \item{adjust.sigma}{ Numeric value. \code{sigma} will be multiplied by this value. } } \details{ The inhomogeneous multitype (type \eqn{i} to any type) pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} is a summary of the dependence between different types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and another point of any type at location \eqn{y}, where \eqn{x} and \eqn{y} are separated by a distance \eqn{r}, is equal to \deqn{ p(r) = \lambda_i(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda(y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}, and where \eqn{\lambda}{lambda} is the intensity function of the points of all types. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda(y)}{p(r) = lambda[i](x) * lambda(y)} so \eqn{g_{i\bullet}(r) = 1}{g[i.](r) = 1}. The command \code{pcfdot.inhom} estimates the inhomogeneous multitype pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. The arguments \code{bw} and \code{adjust.bw} control the degree of one-dimensional smoothing of the estimate of pair correlation. If the arguments \code{lambdaI} and/or \code{lambdadot} are missing or null, they will be estimated from \code{X} by spatial kernel smoothing using a leave-one-out estimator, computed by \code{\link{density.ppp}}. The arguments \code{sigma}, \code{varcov} and \code{adjust.sigma} control the degree of spatial smoothing. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{i\bullet}(r)}{g[i.](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfdot}}, \code{\link{pcfcross.inhom}} } \examples{ plot(pcfdot.inhom(amacrine, "on", stoyan=0.1), legendpos="bottom") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/auc.Rd0000644000176200001440000000517614650323373015423 0ustar liggesusers\name{auc} \alias{auc} \alias{auc.ppp} \title{ Area Under ROC Curve } \description{ Compute the AUC (area under the Receiver Operating Characteristic curve) for an observed point pattern. } \usage{ auc(X, \dots) \method{auc}{ppp}(X, covariate, \dots, high = TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{as.mask}} controlling the pixel resolution for calculations. } } \details{ This command computes the AUC, the area under the Receiver Operating Characteristic curve. The ROC itself is computed by \code{\link[spatstat.explore]{roc}}. For a point pattern \code{X} and a covariate \code{Z}, the AUC is a numerical index that measures the ability of the covariate to separate the spatial domain into areas of high and low density of points. Let \eqn{x_i}{x[i]} be a randomly-chosen data point from \code{X} and \eqn{U} a randomly-selected location in the study region. The AUC is the probability that \eqn{Z(x_i) > Z(U)}{Z(x[i]) > Z(U)} assuming \code{high=TRUE}. That is, AUC is the probability that a randomly-selected data point has a higher value of the covariate \code{Z} than does a randomly-selected spatial location. The AUC is a number between 0 and 1. A value of 0.5 indicates a complete lack of discriminatory power. } \value{ Numeric. For \code{auc.ppp} and \code{auc.lpp}, the result is a single number giving the AUC value. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.explore]{roc}} } \examples{ auc(swedishpines, "x") } \keyword{spatial} spatstat.explore/man/envelope.pp3.Rd0000644000176200001440000002250614643125461017165 0ustar liggesusers\name{envelope.pp3} \alias{envelope.pp3} \title{Simulation Envelopes of Summary Function for 3D Point Pattern} \description{ Computes simulation envelopes of a summary function for a three-dimensional point pattern. } \usage{ \method{envelope}{pp3}(Y, fun=K3est, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A three-dimensional point pattern (object of class \code{"pp3"}). } \item{fun}{ Function that computes the desired summary statistic for a 3D point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ A function value table (object of class \code{"fv"}) which can be plotted directly. See \code{\link{envelope}} for further details. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described in the help file for \code{\link{envelope}}. This function \code{envelope.pp3} is the method for three-dimensional point patterns (objects of class \code{"pp3"}). For the most basic use, if you have a 3D point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, K3est,nsim=39))} to see the three-dimensional \eqn{K} function for \code{X} plotted together with the envelopes of the three-dimensional \eqn{K} function for 39 simulations of CSR. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. See \code{\link{envelope}} for details. } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{K3est}, \code{G3est}, \code{F3est} or \code{pcf3est}. It may also be a character string containing the name of one of these functions. For further information, see the documentation for \code{\link{envelope}}. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. } \seealso{ \code{\link[spatstat.geom]{pp3}}, \code{\link[spatstat.random]{rpoispp3}}, \code{\link{K3est}}, \code{\link{G3est}}, \code{\link{F3est}}, \code{\link{pcf3est}}. } \examples{ X <- rpoispp3(20, box3()) if(interactive()) { plot(envelope(X, nsim=39)) } \testonly{ plot(envelope(X, nsim=4)) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Three-dimensional} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/Gcross.Rd0000644000176200001440000002213214643125461016102 0ustar liggesusers\name{Gcross} \alias{Gcross} \title{ Multitype Nearest Neighbour Distance Function (i-to-j) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest point of type \eqn{j}. } \usage{ Gcross(X, i, j, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type distance distribution function \eqn{G_{ij}(r)}{Gij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{ Ignored. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{ij}(r)}{Gij(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{ij}(r)}{Gij(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{ij}(r)}{Gij(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{ij}(r)}{Gij(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gcross} and its companions \code{\link{Gdot}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``cross-type'' (type \eqn{i} to type \eqn{j}) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{ij}(r)}{Gij(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest point of type \eqn{j}. An estimate of \eqn{G_{ij}(r)}{Gij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{G_{ij}(r)}{Gij(r)} would equal \eqn{F_j(r)}{Fj(r)}, the empty space function of the type \eqn{j} points. For a multitype Poisson point process where the type \eqn{i} points have intensity \eqn{\lambda_i}{lambda[i]}, we have \deqn{G_{ij}(r) = 1 - e^{ - \lambda_j \pi r^2} }{% Gij(r) = 1 - exp( - lambda[j] * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{ij}}{Gij} curves may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{G_{ij}(r)}{Gij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{ij}(r)}{Gij(r)}. This estimate should be used with caution as \eqn{G_{ij}(r)}{Gij(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{ij}}{Gij}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{ij}}{Gij} as if it were an unbiased estimator of \eqn{G_{ij}}{Gij}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{ij}}{Gij} does not necessarily have a density. The reduced sample estimator of \eqn{G_{ij}}{Gij} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{ij}}{Gij} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gdot}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G01 <- Gcross(amacrine) # equivalent to: \donttest{ G01 <- Gcross(amacrine, "off", "on") } plot(G01) # empty space function of `on' points if(interactive()) { F1 <- Fest(split(amacrine)$on, r = G01$r) lines(F1$r, F1$km, lty=3) } # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gcross(pp, "0", "1") # note: "0" not 0 } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/with.ssf.Rd0000644000176200001440000000301614611073325016403 0ustar liggesusers\name{with.ssf} \alias{with.ssf} \alias{apply.ssf} \title{ Evaluate Expression in a Spatially Sampled Function } \description{ Given a spatially sampled function, evaluate an expression involving the function values. } \usage{ apply.ssf(X, \dots) \method{with}{ssf}(data, \dots) } \arguments{ \item{X, data}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link{with.default}} or \code{\link{apply}} specifying what to compute. } } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. It contains a data frame which provides the function values at the sample points. In \code{with.ssf}, the expression specified by \code{\dots} will be evaluated in this dataframe. In \code{apply.ssf}, the dataframe will be subjected to the \code{\link{apply}} operator using the additional arguments \code{\dots}. If the result of evaluation is a data frame with one row for each data point, or a numeric vector with one entry for each data point, then the result will be an object of class \code{"ssf"} containing this information. Otherwise, the result will be a numeric vector. } \value{ An object of class \code{"ssf"} or a numeric vector. } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, data.frame(d=nndist(cells), i=1:npoints(cells))) with(a, i/d) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.explore/man/Smooth.Rd0000644000176200001440000000173214611073325016112 0ustar liggesusers\name{Smooth} \alias{Smooth} \title{Spatial smoothing of data} \description{ Generic function to perform spatial smoothing of spatial data. } \usage{ Smooth(X, ...) } \arguments{ \item{X}{Some kind of spatial data} \item{\dots}{Arguments passed to methods.} } \details{ This generic function calls an appropriate method to perform spatial smoothing on the spatial dataset \code{X}. Methods for this function include \itemize{ \item \code{\link{Smooth.ppp}} for point patterns \item \code{\link[spatstat.model]{Smooth.msr}} for measures \item \code{\link{Smooth.fv}} for function value tables } } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{Smooth.im}}, \code{\link[spatstat.model]{Smooth.msr}}, \code{\link{Smooth.fv}}. } \value{ An object containing smoothed values of the input data, in an appropriate format. See the documentation for the methods. } \author{ \adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/pool.fv.Rd0000644000176200001440000000317214611073324016223 0ustar liggesusers\name{pool.fv} \alias{pool.fv} \title{Pool Several Functions} \description{ Combine several summary functions into a single function. } \usage{ \method{pool}{fv}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"fv"}. } \item{weights}{ Optional numeric vector of weights for the functions. } \item{relabel}{ Logical value indicating whether the columns of the resulting function should be labelled to show that they were obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fv"} of summary functions. It is used to combine several estimates of the same function into a single function. Each of the arguments \code{\dots} must be an object of class \code{"fv"}. They must be compatible, in that they are estimates of the same function, and were computed using the same options. The sample mean and sample variance of the corresponding estimates will be computed. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pool}}, \code{\link{pool.anylist}}, \code{\link{pool.rat}} } \examples{ K <- lapply(waterstriders, Kest, correction="iso") Kall <- pool(K[[1]], K[[2]], K[[3]]) Kall <- pool(as.anylist(K)) plot(Kall, cbind(pooliso, pooltheo) ~ r, shade=c("loiso", "hiiso"), main="Pooled K function of waterstriders") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat.explore/man/spatcov.Rd0000644000176200001440000001111214643125461016315 0ustar liggesusers\name{spatcov} \alias{spatcov} \title{ Estimate the Spatial Covariance Function of a Random Field } \description{ Given a pixel image, calculate an estimate of the spatial covariance function. Given two pixel images, calculate an estimate of their spatial cross-covariance function. } \usage{ spatcov(X, Y=X, \dots, correlation=FALSE, isotropic = TRUE, clip = TRUE, pooling=TRUE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{Y}{ Optional. Another pixel image. } \item{correlation}{ Logical value specifying whether to standardise so that the spatial correlation function is returned. } \item{isotropic}{ Logical value specifying whether to assume the covariance is isotropic, so that the result is a function of the lag distance. } \item{clip}{ Logical value specifying whether to restrict the results to the range of spatial lags where the estimate is reliable. } \item{pooling}{ Logical value specifying the estimation method when \code{isotropic=TRUE}. } \item{\dots}{Ignored.} } \details{ In normal usage, only the first argument \code{X} is given. Then the pixel image \code{X} is treated as a realisation of a stationary random field, and its spatial covariance function is estimated. Alternatively if \code{Y} is given, then \code{X} and \code{Y} are assumed to be jointly stationary random fields, and their spatial cross-covariance function is estimated. For any random field \code{X}, the spatial covariance is defined for any two spatial locations \eqn{u} and \eqn{v} by \deqn{ C(u,v) = \mbox{cov}(X(u), X(v)) }{ C(u,v) = cov(X(u), X(v)) } where \eqn{X(u)} and \eqn{X(v)} are the values of the random field at those locations. Here\eqn{\mbox{cov}}{cov} denotes the statistical covariance, defined for any random variables \eqn{A} and \eqn{B} by \eqn{\mbox{cov}(A,B) = E(AB) - E(A) E(B)}{cov(A,B) = E(AB) - E(A) E(B)} where \eqn{E(A)} denotes the expected value of \eqn{A}. If the random field is assumed to be stationary (at least second-order stationary) then the spatial covariance \eqn{C(u,v)} depends only on the lag vector \eqn{v-u}: \deqn{ C(u,v) = C_2(v-u) } \deqn{ C(u,v) = C2(v-u) } where \eqn{C_2}{C2} is a function of a single vector argument. If the random field is stationary and isotropic, then the spatial covariance depends only on the lag distance \eqn{\| v - u \|}{||v-u||}: \deqn{ C_2(v-u) = C_1(\|v-u\|) }{ C2(v-u) = C1(||v-u||) } where \eqn{C_1}{C1} is a function of distance. The function \code{spatcov} computes estimates of the covariance function \eqn{C_1}{C1} or \eqn{C_2}{C2} as follows: \itemize{ \item If \code{isotropic=FALSE}, an estimate of the covariance function \eqn{C_2}{C2} is computed, assuming the random field is stationary, using the naive moment estimator, \code{C2 = imcov(X-mean(X))/setcov(Window(X))}. The result is a pixel image. \item If \code{isotropic=TRUE} (the default) an estimate of the covariance function \eqn{C_1}{C1} is computed, assuming the random field is stationary and isotropic. \itemize{ \item When \code{pooling=FALSE}, the estimate of \eqn{C_1}{C1} is the rotational average of the naive estimate of \eqn{C_2}{C2}. \item When \code{pooling=TRUE} (the default), the estimate of \eqn{C_1}{C1} is the ratio of the rotational averages of the numerator and denominator which form the naive estimate of \eqn{C_2}{C2}. } The result is a function object (class \code{"fv"}). } If the argument \code{Y} is given, it should be a pixel image compatible with \code{X}. An estimate of the spatial cross-covariance function between \code{X} and \code{Y} will be computed. } \value{ If \code{isotropic=TRUE} (the default), the result is a function value table (object of class \code{"fv"}) giving the estimated values of the covariance function or spatial correlation function for a sequence of values of the spatial lag distance \code{r}. If \code{isotropic=FALSE}, the result is a pixel image (object of class \code{"im"}) giving the estimated values of the spatial covariance function or spatial correlation function for a grid of values of the spatial lag vector. } \author{ \adrian } \seealso{ \code{\link[spatstat.geom]{imcov}}, \code{\link[spatstat.geom]{setcov}} } \examples{ if(offline <- !interactive()) op <- spatstat.options(npixel=32) D <- density(cells) plot(spatcov(D)) if(offline) spatstat.options(op) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/sdrPredict.Rd0000644000176200001440000000236414611073325016746 0ustar liggesusers\name{sdrPredict} \alias{sdrPredict} \title{ Compute Predictors from Sufficient Dimension Reduction } \description{ Given the result of a Sufficient Dimension Reduction method, compute the new predictors. } \usage{ sdrPredict(covariates, B) } \arguments{ \item{covariates}{ A list of pixel images (objects of class \code{"im"}). } \item{B}{ Either a matrix of coefficients for the covariates, or the result of a call to \code{\link{sdr}}. } } \details{ This function assumes that \code{\link{sdr}} has already been used to find a minimal set of predictors based on the \code{covariates}. The argument \code{B} should be either the result of \code{\link{sdr}} or the coefficient matrix returned as one of the results of \code{\link{sdr}}. The columns of this matrix define linear combinations of the \code{covariates}. This function evaluates those linear combinations, and returns a list of pixel images containing the new predictors. } \value{ A list of pixel images (objects of class \code{"im"}) with one entry for each column of \code{B}. } \author{ \adrian } \seealso{ \code{\link{sdr}} } \examples{ A <- sdr(bei, bei.extra) Y <- sdrPredict(bei.extra, A) Y } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Iest.Rd0000644000176200001440000001237114643125461015552 0ustar liggesusers\name{Iest} \alias{Iest} \title{Estimate the I-function} \description{ Estimates the summary function \eqn{I(r)} for a multitype point pattern. } \usage{ Iest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{I(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector of values for the argument \eqn{r} at which \eqn{I(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. Vector of character strings specifying the edge correction(s) to be used by \code{\link{Jest}}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{I} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{I(r)} computed from the border-corrected estimates of \eqn{J} functions} \item{km}{the spatial Kaplan-Meier estimator of \eqn{I(r)} computed from the Kaplan-Meier estimates of \eqn{J} functions} \item{han}{the Hanisch-style estimator of \eqn{I(r)} computed from the Hanisch-style estimates of \eqn{J} functions} \item{un}{the uncorrected estimate of \eqn{I(r)} computed from the uncorrected estimates of \eqn{J} } \item{theo}{the theoretical value of \eqn{I(r)} for a stationary Poisson process: identically equal to \eqn{0} } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{I} function summarises the dependence between types in a multitype point process (Van Lieshout and Baddeley, 1999) It is based on the concept of the \eqn{J} function for an unmarked point process (Van Lieshout and Baddeley, 1996). See \code{\link{Jest}} for information about the \eqn{J} function. The \eqn{I} function is defined as \deqn{ % I(r) = \sum_{i=1}^m p_i J_{ii}(r) % - J_{\bullet\bullet}(r)}{ % I(r) = (sum p[i] Jii(r)) - J(r) } where \eqn{J_{\bullet\bullet}}{J} is the \eqn{J} function for the entire point process ignoring the marks, while \eqn{J_{ii}}{Jii} is the \eqn{J} function for the process consisting of points of type \eqn{i} only, and \eqn{p_i}{p[i]} is the proportion of points which are of type \eqn{i}. The \eqn{I} function is designed to measure dependence between points of different types, even if the points are not Poisson. Let \eqn{X} be a stationary multitype point process, and write \eqn{X_i}{X[i]} for the process of points of type \eqn{i}. If the processes \eqn{X_i}{X[i]} are independent of each other, then the \eqn{I}-function is identically equal to \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} typically indicate negative and positive association, respectively, between types. See Van Lieshout and Baddeley (1999) for further information. An estimate of \eqn{I} derived from a multitype spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{I(r)} is compared against the constant function \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} may suggest negative and positive association, respectively. This algorithm estimates the \eqn{I}-function from the multitype point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial marked point process in the plane, observed through a bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link[spatstat.geom]{as.ppp}()}. It must be a multitype point pattern (it must have a \code{marks} vector which is a \code{factor}). The function \code{\link{Jest}} is called to compute estimates of the \eqn{J} functions in the formula above. In fact three different estimates are computed using different edge corrections. See \code{\link{Jest}} for information. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jest}} } \examples{ Ic <- Iest(amacrine) plot(Ic, main="Amacrine Cells data") # values are below I= 0, suggesting negative association # between 'on' and 'off' cells. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Kest.fft.Rd0000644000176200001440000000624414643125461016334 0ustar liggesusers\name{Kest.fft} \alias{Kest.fft} \title{K-function using FFT} \description{ Estimates the reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape, using the Fast Fourier Transform. } \usage{ Kest.fft(X, sigma, r=NULL, \dots, breaks=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{sigma}{ Standard deviation of the isotropic Gaussian smoothing kernel. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{as.mask}} determining the spatial resolution for the FFT calculation. } \item{breaks}{ This argument is for internal use only. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{border}{the estimates of \eqn{K(r)} for these values of \eqn{r} } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } } \details{ This is an alternative to the function \code{\link{Kest}} for estimating the \eqn{K} function. It may be useful for very large patterns of points. Whereas \code{\link{Kest}} computes the distance between each pair of points analytically, this function discretises the point pattern onto a rectangular pixel raster and applies Fast Fourier Transform techniques to estimate \eqn{K(t)}. The hard work is done by the function \code{\link{Kmeasure}}. The result is an approximation whose accuracy depends on the resolution of the pixel raster. The resolution is controlled by the arguments \code{\dots}, or by setting the parameter \code{npixel} in \code{\link[spatstat.geom]{spatstat.options}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kmeasure}}, \code{\link[spatstat.geom]{spatstat.options}} } \examples{ pp <- runifpoint(10000) \testonly{ op <- spatstat.options(npixel=125) } Kpp <- Kest.fft(pp, 0.01) plot(Kpp) \testonly{spatstat.options(op)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/alltypes.Rd0000644000176200001440000002310414611073323016471 0ustar liggesusers\name{alltypes} \alias{alltypes} \title{Calculate Summary Statistic for All Types in a Multitype Point Pattern} \description{ Given a marked point pattern, this computes the estimates of a selected summary function (\eqn{F},\eqn{G}, \eqn{J}, \eqn{K} etc) of the pattern, for all possible combinations of marks, and returns these functions in an array. } \usage{ alltypes(X, fun="K", \dots, dataname=NULL,verb=FALSE,envelope=FALSE,reuse=TRUE) } \arguments{ \item{X}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"} or \code{"lpp"}. } \item{fun}{The summary function. Either an \R function, or a character string indicating the summary function required. Options for strings are \code{"F"}, \code{"G"}, \code{"J"}, \code{"K"}, \code{"L"}, \code{"pcf"}, \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}, \code{"Ldot"}. } \item{\dots}{ Arguments passed to the summary function (and to the function \code{\link{envelope}} if appropriate) } \item{dataname}{Character string giving an optional (alternative) name to the point pattern, different from what is given in the call. This name, if supplied, may be used by \code{\link{plot.fasp}()} in forming the title of the plot. If not supplied it defaults to the parsing of the argument supplied as \code{X} in the call. } \item{verb}{ Logical value. If \code{verb} is true then terse ``progress reports'' (just the values of the mark indices) are printed out when the calculations for that combination of marks are completed. } \item{envelope}{ Logical value. If \code{envelope} is true, then simulation envelopes of the summary function will also be computed. See Details. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This routine is a convenient way to analyse the dependence between types in a multitype point pattern. It computes the estimates of a selected summary function of the pattern, for all possible combinations of marks. It returns these functions in an array (an object of class \code{"fasp"}) amenable to plotting by \code{\link{plot.fasp}()}. The argument \code{fun} specifies the summary function that will be evaluated for each type of point, or for each pair of types. It may be either an \R function or a character string. Suppose that the points have possible types \eqn{1,2,\ldots,m} and let \eqn{X_i}{X[i]} denote the pattern of points of type \eqn{i} only. If \code{fun="F"} then this routine calculates, for each possible type \eqn{i}, an estimate of the Empty Space Function \eqn{F_i(r)}{F[i](r)} of \eqn{X_i}{X[i]}. See \code{\link{Fest}} for explanation of the empty space function. The estimate is computed by applying \code{\link{Fest}} to \eqn{X_i}{X[i]} with the optional arguments \code{\dots}. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"}, the routine calculates, for each pair of types \eqn{(i,j)}, an estimate of the ``\code{i}-to\code{j}'' cross-type function \eqn{G_{ij}(r)}{G[i,j](r)}, \eqn{J_{ij}(r)}{J[i,j](r)}, \eqn{K_{ij}(r)}{K[i,j](r)} or \eqn{L_{ij}(r)}{L[i,j](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X_j}{X[j]}. See \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or \code{\link{Lcross}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gcross}} etc) to \code{X} using each possible value of the arguments \code{i,j}, together with the optional arguments \code{\dots}. If \code{fun} is \code{"pcf"} the routine calculates the cross-type pair correlation function \code{\link{pcfcross}} between each pair of types. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the routine calculates, for each type \eqn{i}, an estimate of the ``\code{i}-to-any'' dot-type function \eqn{G_{i\bullet}(r)}{G[i.](r)}, \eqn{J_{i\bullet}(r)}{J[i.](r)} or \eqn{K_{i\bullet}(r)}{K[i.](r)} or \eqn{L_{i\bullet}(r)}{L[i.](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X}{X}. See \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gdot}} etc) to \code{X} using each possible value of the argument \code{i}, together with the optional arguments \code{\dots}. The letters \code{"G"}, \code{"J"}, \code{"K"} and \code{"L"} are interpreted as abbreviations for \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} and \code{\link{Lcross}} respectively, assuming the point pattern is marked. If the point pattern is unmarked, the appropriate function \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} is invoked instead. If \code{envelope=TRUE}, then as well as computing the value of the summary function for each combination of types, the algorithm also computes simulation envelopes of the summary function for each combination of types. The arguments \code{\dots} are passed to the function \code{\link{envelope}} to control the number of simulations, the random process generating the simulations, the construction of envelopes, and so on. When \code{envelope=TRUE} it is possible that errors could occur because the simulated point patterns do not satisfy the requirements of the summary function (for example, because the simulated pattern is empty and \code{fun} requires at least one point). If the number of such errors exceeds the maximum permitted number \code{maxnerr}, then the envelope algorithm will give up, and will return the empirical summary function for the data point pattern, \code{fun(X)}, in place of the envelope. } \value{ A function array (an object of class \code{"fasp"}, see \code{\link{fasp.object}}). This can be plotted using \code{\link{plot.fasp}}. If the pattern is not marked, the resulting ``array'' has dimensions \eqn{1 \times 1}{1 x 1}. Otherwise the following is true: If \code{fun="F"}, the function array has dimensions \eqn{m \times 1}{m * 1} where \eqn{m} is the number of different marks in the point pattern. The entry at position \code{[i,1]} in this array is the result of applying \code{\link{Fest}} to the points of type \code{i} only. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the function array again has dimensions \eqn{m \times 1}{m * 1}. The entry at position \code{[i,1]} in this array is the result of \code{Gdot(X, i)}, \code{Jdot(X, i)} \code{Kdot(X, i)} or \code{Ldot(X, i)} respectively. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"} (or their abbreviations \code{"G"}, \code{"J"}, \code{"K"} or \code{"L"}), the function array has dimensions \eqn{m \times m}{m * m}. The \code{[i,j]} entry of the function array (for \eqn{i \neq j}{i != j}) is the result of applying the function \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or\code{\link{Lcross}} to the pair of types \code{(i,j)}. The diagonal \code{[i,i]} entry of the function array is the result of applying the univariate function \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} to the points of type \code{i} only. If \code{envelope=FALSE}, then each function entry \code{fns[[i]]} retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}} ,\code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J functions, and \code{cbind(trans,theo) ~ r} for K and L functions. If \code{envelope=TRUE}, then each function entry \code{fns[[i]]} has the same format as the output of the \code{\link{envelope}} command. } \note{ Sizeable amounts of memory may be needed during the calculation. } \seealso{ \code{\link{plot.fasp}}, \code{\link{fasp.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}}, \code{\link{envelope}}. } \examples{ # bramblecanes (3 marks). bram <- bramblecanes \testonly{ bram <- bram[c(seq(1, 744, by=20), seq(745, 823, by=4))] } bF <- alltypes(bram,"F",verb=TRUE) plot(bF) if(interactive()) { plot(alltypes(bram,"G")) plot(alltypes(bram,"Gdot")) } # Swedishpines (unmarked). swed <- swedishpines \testonly{ swed <- swed[1:25] } plot(alltypes(swed,"K")) plot(alltypes(amacrine, "pcf"), ylim=c(0,1.3)) # envelopes bKE <- alltypes(bram,"K",envelope=TRUE,nsim=19) # global version: \donttest{ bFE <- alltypes(bram,"F",envelope=TRUE,nsim=19,global=TRUE) } # extract one entry as.fv(bKE[1,1]) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pcfcross.Rd0000644000176200001440000001506214611073324016463 0ustar liggesusers\name{pcfcross} \alias{pcfcross} \title{Multitype pair correlation function (cross-type)} \description{ Calculates an estimate of the cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross(X, i, j, \dots, r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d"), ratio = FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ The cross-type pair correlation function is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of type \eqn{j} at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda_j g_{i,j}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda[j] * g[i,j](r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda_j}{p(r) = lambda[i] * lambda[j]} so \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. Indeed for any marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the cross-type pair correlation is \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. For a stationary multitype point process, the cross-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i,j}(r) = \frac{K_{i,j}^\prime(r)}{2\pi r} }{ g(r) = K[i,j]'(r)/ ( 2 * pi * r) } where \eqn{K_{i,j}^\prime}{K[i,j]'(r)} is the derivative of the cross-type \eqn{K} function \eqn{K_{i,j}(r)}{K[i,j](r)}. of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. The command \code{pcfcross} computes a kernel estimate of the cross-type pair correlation function between marks \eqn{i} and \eqn{j}. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfdot}} computes the corresponding analogue of \code{\link{Kdot}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i,j}}{g[i,j]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i,j}(r) = 1}{g[i,j](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfdot}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kcross}} } \examples{ p <- pcfcross(amacrine, "off", "on") p <- pcfcross(amacrine, "off", "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bits.envelope.Rd0000644000176200001440000001134514611073323017415 0ustar liggesusers\name{bits.envelope} \alias{bits.envelope} \title{ Global Envelopes for Balanced Independent Two-Stage Test } \description{ Computes the global envelopes corresponding to the balanced independent two-stage Monte Carlo test of goodness-of-fit. } \usage{ bits.envelope(X, \dots, nsim = 19, nrank = 1, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{verbose=FALSE} to turn off the messages. } \item{nsim}{ Number of simulated patterns to be generated in each stage. Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsim} simulated realisations, together with one independent set of \code{nsim} realisations, so there will be a total of \code{nsim * (nsim + 1)} simulations. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{alternative="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{alternative="less"}) or a one-sided test with an upper critical boundary (\code{alternative="greater"}). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value determining whether to print progress reports. } } \details{ Computes global simulation envelopes corresponding to the balanced independent two-stage Monte Carlo test of goodness-of-fit described by Baddeley et al (2017). The envelopes are described in Baddeley et al (2019). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. This command is similar to \code{\link{dg.envelope}} which corresponds to the Dao-Genton test of goodness-of-fit. It was shown in Baddeley et al (2017) that the Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.envelope}} in this case. } \value{ An object of class \code{"fv"}. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Hardegen, A., Lawrence, T., Milne, R.K., Nair, G. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis} \bold{114}, {75--87}. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2019) Pushing the envelope: extensions of graphical Monte Carlo tests. In preparation. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \adrian. } \seealso{ \code{\link{dg.envelope}}, \code{\link{bits.test}}, \code{\link{mad.test}}, \code{\link{envelope}} } \examples{ ns <- if(interactive()) 19 else 4 E <- bits.envelope(swedishpines, Lest, nsim=ns) E plot(E) Eo <- bits.envelope(swedishpines, Lest, alternative="less", nsim=ns) Ei <- bits.envelope(swedishpines, Lest, interpolate=TRUE, nsim=ns) } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/bw.smoothppp.Rd0000644000176200001440000001036114611073323017275 0ustar liggesusers\name{bw.smoothppp} \alias{bw.smoothppp} \title{ Cross Validated Bandwidth Selection for Spatial Smoothing } \description{ Uses least-squares cross-validation to select a smoothing bandwidth for spatial smoothing of marks. } \usage{ bw.smoothppp(X, nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE, kernel="gaussian", varcov1=NULL) } \arguments{ \item{X}{ A marked point pattern with numeric marks. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}). } \item{varcov1}{ Optional. Variance-covariance matrix matrix of the kernel with bandwidth \eqn{h=1}. See section on Anisotropic Smoothing. } } \details{ This function selects an appropriate bandwidth for the nonparametric smoothing of mark values using \code{\link{Smooth.ppp}}. The argument \code{X} must be a marked point pattern with a vector or data frame of marks. All mark values must be numeric. The bandwidth is selected by least-squares cross-validation. Let \eqn{y_i}{y[i]} be the mark value at the \eqn{i}th data point. For a particular choice of smoothing bandwidth, let \eqn{\hat y_i}{y*[i]} be the smoothed value at the \eqn{i}th data point. Then the bandwidth is chosen to minimise the squared error of the smoothed values \eqn{\sum_i (y_i - \hat y_i)^2}{sum (y[i] - y*[i])^2}. The result of \code{bw.smoothppp} is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on the nearest neighbour distances. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \section{Anisotropic Smoothing}{ Anisotropic smoothing is available in \code{\link{Smooth.ppp}} using the argument \code{varcov} to specify the variance-covariance matrix of the anisotropic kernel. In order to choose the matrix \code{varcov}, the user can call \code{bw.smoothppp} using the argument \code{varcov1} to specify a \sQuote{template} matrix. Scalar multiples of \code{varcov1} will be considered and the optimal scale factor will be determined. That is, \code{bw.smoothppp} will try smoothing the data using \code{varcov = h^2 * varcov1} for different values of \code{h} ranging from \code{hmin} to \code{hmax}. The result of \code{bw.smoothppp} will be the optimal value of the standard deviation scale factor \code{h}. } \value{ A single numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \seealso{ \code{\link{Smooth.ppp}}, \code{\link[spatstat.explore]{bw.optim.object}} } \examples{ \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.smoothppp(longleaf) b plot(b) \testonly{spatstat.options(op)} } \author{\adrian and \rolf } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/eval.fv.Rd0000644000176200001440000001207414611073324016202 0ustar liggesusers\name{eval.fv} \alias{eval.fv} \title{Evaluate Expression Involving Functions} \description{ Evaluates any expression involving one or more function value (fv) objects, and returns another object of the same kind. } \usage{ eval.fv(expr, envir, dotonly=TRUE, equiv=NULL, relabel=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{ Optional. The environment in which to evaluate the expression, or a named list containing \code{"fv"} objects to be used in the expression. } \item{dotonly}{Logical. See Details.} \item{equiv}{Mapping between column names of different objects that are deemed to be equivalent. See Details.} \item{relabel}{ Logical value indicating whether to compute appropriate labels for the resulting function. This should normally be \code{TRUE} (the default). See Details. } } \details{ This is a wrapper to make it easier to perform pointwise calculations with the summary functions used in spatial statistics. An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link[spatstat.explore]{Kest}} and its relatives. For example, suppose \code{X} is an object of class \code{"fv"} containing several different estimates of the Ripley's K function \eqn{K(r)}, evaluated at a sequence of values of \eqn{r}. Then \code{eval.fv(X+3)} effectively adds 3 to each function estimate in \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fv"} which are compatible (in particular they have the same vector of \eqn{r} values). Then \code{eval.im(X + Y)} will add the corresponding function values in \code{X} and \code{Y}, and return the resulting function. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fv} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fv"}. Each such name is replaced by a vector containing the function values. The expression is then evaluated. The result should be a vector; it is taken as the new vector of function values. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fv"} in the expression. If the objects are not compatible, they will be made compatible by \code{\link{harmonise.fv}}. If \code{dotonly=TRUE} (the default), the expression will be evaluated only for those columns of an \code{"fv"} object that contain values of the function itself (rather than values of the derivative of the function, the hazard rate, etc). If \code{dotonly=FALSE}, the expression will be evaluated for all columns. For example the result of \code{\link[spatstat.explore]{Fest}} includes several columns containing estimates of the empty space function \eqn{F(r)}, but also includes an estimate of the \emph{hazard} \eqn{h(r)} of \eqn{F(r)}. Transformations that are valid for \eqn{F} may not be valid for \eqn{h}. Accordingly, \eqn{h} would normally be omitted from the calculation. The columns of an object \code{x} that represent the function itself are identified by its \dQuote{dot} names, \code{fvnames(x, ".")}. They are the columns normally plotted by \code{\link{plot.fv}} and identified by the symbol \code{"."} in plot formulas in \code{\link{plot.fv}}. The argument \code{equiv} can be used to specify that two different column names in different function objects are mathematically equivalent or cognate. It should be a list of \code{name=value} pairs, or a named vector of character strings, indicating the pairing of equivalent names. (Without this argument, these columns would be discarded.) See the Examples. The argument \code{relabel} should normally be \code{TRUE} (the default). It determines whether to compute appropriate mathematical labels and descriptions for the resulting function object (used when the object is printed or plotted). If \code{relabel=FALSE} then this does not occur, and the mathematical labels and descriptions in the result are taken from the function object that appears first in the expression. This reduces computation time slightly (for advanced use only). } \value{ Another object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link[spatstat.explore]{Kest}} } \examples{ # manipulating the K function X <- runifrect(42) Ks <- Kest(X) eval.fv(Ks + 3) Ls <- eval.fv(sqrt(Ks/pi)) # manipulating two K functions Y <- runifrect(20) Kr <- Kest(Y) Kdif <- eval.fv(Ks - Kr) Z <- eval.fv(sqrt(Ks/pi) - sqrt(Kr/pi)) ## Use of 'envir' U <- eval.fv(sqrt(K), list(K=Ks)) ## Use of 'equiv' Fc <- Fest(cells) Gc <- Gest(cells) # Hanisch and Chiu-Stoyan estimators are cognate Dc <- eval.fv(Fc - Gc, equiv=list(cs="han")) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.explore/man/relrisk.ppp.Rd0000644000176200001440000003076214643125462017124 0ustar liggesusers\name{relrisk.ppp} \alias{relrisk.ppp} \title{ Nonparametric Estimate of Spatially-Varying Relative Risk } \description{ Given a multitype point pattern, this function estimates the spatially-varying probability of each type of point, or the ratios of such probabilities, using kernel smoothing. The default smoothing bandwidth is selected by cross-validation. } \usage{ \method{relrisk}{ppp}(X, sigma = NULL, ..., at = c("pixels", "points"), weights = NULL, varcov = NULL, relative=FALSE, adjust=1, edge=TRUE, diggle=FALSE, se=FALSE, wtype=c("value", "multiplicity"), casecontrol=TRUE, control=1, case, fudge=0) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{sigma}{ Optional. The numeric value of the smoothing bandwidth (the standard deviation of isotropic Gaussian smoothing kernel). Alternatively \code{sigma} may be a function which can be used to select a different bandwidth for each type of point. See Details. } \item{\dots}{ Arguments passed to \code{\link{bw.relrisk}} to select the bandwidth, or passed to \code{\link{density.ppp}} to control the pixel resolution. } \item{at}{ Character string specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{weights}{ Optional. Weights for the data points of \code{X}. A numeric vector, an \code{expression}, or a pixel image. } \item{varcov}{ Optional. Variance-covariance matrix of anisotopic Gaussian smoothing kernel. Incompatible with \code{sigma}. } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{wtype}{ Character string (partially matched) specifying how the weights should be interpreted for the calculation of standard error. See Details. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } \item{fudge}{ Optional. A single numeric value, or a numeric vector with one entry for each type of point. This value will be added to the estimates of point process intensity, before calculation of the relative risk. } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.ppp} is the method for point pattern datasets. It computes \emph{nonparametric} estimates of relative risk by kernel smoothing (Bithell, 1990, 1991; Diggle, 2003; Baddeley, Rubak and Turner, 2015). If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Estimation is performed by a simple Nadaraja-Watson type kernel smoother (Bithell, 1990, 1991; Diggle, 2003; Baddeley, Rubak and Turner, 2015, section 14.4). The smoothing bandwidth can be specified in any of the following ways: \itemize{ \item \code{sigma} is a single numeric value, giving the standard deviation of the isotropic Gaussian kernel. \item \code{sigma} is a numeric vector of length 2, giving the standard deviations in the \eqn{x} and \eqn{y} directions of a Gaussian kernel. \item \code{varcov} is a 2 by 2 matrix giving the variance-covariance matrix of the Gaussian kernel. \item \code{sigma} is a \code{function} which selects the bandwidth. Bandwidth selection will be applied \bold{separately to each type of point}. An example of such a function is \code{\link{bw.diggle}}. \item \code{sigma} and \code{varcov} are both missing or null. Then a \bold{common} smoothing bandwidth \code{sigma} will be selected by cross-validation using \code{\link{bw.relrisk}}. \item An infinite smoothing bandwidth, \code{sigma=Inf}, is permitted and yields a constant estimate of relative risk. } If \code{se=TRUE} then standard errors will also be computed, based on asymptotic theory, \emph{assuming a Poisson process}. The optional argument \code{weights} may provide numerical weights for the points of \code{X}. It should be a numeric vector of length equal to \code{npoints(X)}. The argument \code{weights} can also be an \code{expression}. It will be evaluated in the data frame \code{as.data.frame(X)} to obtain a vector of weights. The expression may involve the symbols \code{x} and \code{y} representing the Cartesian coordinates, and the symbol \code{marks} representing the mark values. The argument \code{weights} can also be a pixel image (object of class \code{"im"}). numerical weights for the data points will be extracted from this image (by looking up the pixel values at the locations of the data points in \code{X}). } \value{ If \code{se=FALSE} (the default), the format is described below. If \code{se=TRUE}, the result is a list of two entries, \code{estimate} and \code{SE}, each having the format described below. If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \section{Standard error}{ If \code{se=TRUE}, the standard error of the estimate will also be calculated. The calculation assumes a Poisson point process. If \code{weights} are given, then the calculation of standard error depends on the interpretation of the weights. This is controlled by the argument \code{wtype}. \itemize{ \item If \code{wtype="value"} (the default), the weights are interpreted as numerical values observed at the data locations. Roughly speaking, standard errors are proportional to the absolute values of the weights. \item If \code{wtype="multiplicity"} the weights are interpreted as multiplicities so that a weight of 2 is equivalent to having a pair of duplicated points at the data location. Roughly speaking, standard errors are proportional to the square roots of the weights. Negative weights are not permitted. } The default rule is now \code{wtype="value"} but previous versions of \code{relrisk.ppp} (in \pkg{spatstat.explore} versions \code{3.1-0} and earlier) effectively used \code{wtype="multiplicity"}. } \seealso{ There is another method \code{\link[spatstat.model]{relrisk.ppm}} for point process models which computes \emph{parametric} estimates of relative risk, using the fitted model. See also \code{\link{bw.relrisk}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}, \code{\link[spatstat.geom]{eval.im}} } \examples{ p.oak <- relrisk(urkiola, 20) if(interactive()) { plot(p.oak, main="proportion of oak") plot(eval.im(p.oak > 0.3), main="More than 30 percent oak") plot(split(lansing), main="Lansing Woods") p.lan <- relrisk(lansing, 0.05, se=TRUE) plot(p.lan$estimate, main="Lansing Woods species probability") plot(p.lan$SE, main="Lansing Woods standard error") wh <- im.apply(p.lan$estimate, which.max) types <- levels(marks(lansing)) wh <- eval.im(types[wh]) plot(wh, main="Most common species") } } \references{ \baddrubaturnbook Bithell, J.F. (1990) An application of density estimation to geographical epidemiology. \emph{Statistics in Medicine} \bold{9}, 691--701. Bithell, J.F. (1991) Estimation of relative risk functions. \emph{Statistics in Medicine} \bold{10}, 1745--1751. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J., Zheng, P. and Durr, P. (2005) Non-parametric estimation of spatial segregation in a multivariate point process: bovine tuberculosis in Cornwall, UK. \emph{Applied Statistics} \bold{54}, 645--658. } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/FmultiInhom.Rd0000644000176200001440000000551514643125461017103 0ustar liggesusers\name{FmultiInhom} \alias{FmultiInhom} \alias{Fmulti.inhom} \title{ Inhomogeneous Marked F-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{F} function, effectively the cumulative distribution function of the distance from a fixed point to the nearest point in subset \eqn{J}, adjusted for spatially varying intensity. } \usage{ Fmulti.inhom(X, J, lambda = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL) FmultiInhom(X, J, lambda = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}. } \item{J}{ A subset index specifying the subset of points to which distances are measured. Any kind of subset index acceptable to \code{\link[spatstat.geom]{[.ppp}}. } \item{lambda}{ Intensity estimates for each point of \code{X}. A numeric vector of length equal to \code{npoints(X)}. Incompatible with \code{lambdaJ}. } \item{lambdaJ}{ Intensity estimates for each point of \code{X[J]}. A numeric vector of length equal to \code{npoints(X[J])}. Incompatible with \code{lambda}. } \item{lambdamin}{ A lower bound for the intensity, or at least a lower bound for the values in \code{lambdaJ} or \code{lambda[J]}. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution for the computation. } \item{r}{ Vector of distance values at which the inhomogeneous \eqn{G} function should be estimated. There is a sensible default. } } \details{ See Cronie and Van Lieshout (2015). The functions \code{FmultiInhom} and \code{Fmulti.inhom} are identical. } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{F} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Ottmar Cronie and Marie-Colette van Lieshout. Rewritten for \pkg{spatstat} by \adrian. } \seealso{ \code{\link{Finhom}} } \examples{ X <- amacrine J <- (marks(X) == "off") online <- interactive() eps <- if(online) NULL else 0.025 if(online && require(spatstat.model)) { mod <- ppm(X ~ marks * x, eps=eps) lambdaX <- fitted(mod, dataonly=TRUE) lambdaOff <- predict(mod, eps=eps)[["off"]] lmin <- min(lambdaOff) * 0.9 } else { ## faster computation for package checker only lambdaX <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 } plot(FmultiInhom(X, J, lambda=lambdaX, lambdamin=lmin, eps=eps)) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Ldot.Rd0000644000176200001440000000510314611073323015535 0ustar liggesusers\name{Ldot} \alias{Ldot} \title{Multitype L-function (i-to-any)} \description{ Calculates an estimate of the multitype L-function (from type \code{i} to any type) for a multitype point pattern. } \usage{ Ldot(X, i, ..., from, correction) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{correction,\dots}{ Arguments passed to \code{\link{Kdot}}. } \item{from}{An alternative way to specify \code{i}.} } \details{ This command computes \deqn{L_{i\bullet}(r) = \sqrt{\frac{K_{i\bullet}(r)}{\pi}}}{Li.(r) = sqrt(Ki.(r)/pi)} where \eqn{K_{i\bullet}(r)}{Ki.(r)} is the multitype \eqn{K}-function from points of type \code{i} to points of any type. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Ki.(r)}. The command \code{Ldot} first calls \code{\link{Kdot}} to compute the estimate of the \code{i}-to-any \eqn{K}-function, and then applies the square root transformation. For a marked Poisson point process, the theoretical value of the L-function is \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{i\bullet}}{Li.} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}}{Li.} has been estimated } \item{theo}{the theoretical value \eqn{L_{i\bullet}(r) = r}{Li.(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}}{Li.} obtained by the edge corrections named. } \seealso{ \code{\link{Kdot}}, \code{\link{Lcross}}, \code{\link{Lest}} } \examples{ L <- Ldot(amacrine, "off") plot(L) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pairs.im.Rd0000644000176200001440000000710514643125461016367 0ustar liggesusers\name{pairs.im} \alias{pairs.im} \title{ Scatterplot Matrix for Pixel Images } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images. } \usage{ \method{pairs}{im}(..., plot=TRUE, drop=TRUE) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image (object of class \code{"im"}) or a named argument to be passed to \code{\link{pairs.default}}. Alternatively, a single argument which is a list of pixel images. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } \item{drop}{ Logical value specifying whether pixel values that are \code{NA} should be removed from the data frame that is returned by the function. This does not affect the plot. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. First the pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. The return value of \code{pairs.im} is a data frame, returned invisibly. The data frame has one column for each image. Each row contains the pixel values of the different images for one pixel in the raster. If \code{drop=TRUE} (the default), any row which contains \code{NA} is deleted. The plot is not affected by the value of \code{drop}. } \section{Image or Contour Plots}{ Since the scatterplots may show very dense concentrations of points, it may be useful to set \code{panel=panel.image} or \code{panel=panel.contour} to draw a colour image or contour plot of the kernel-smoothed density of the scatterplot in each panel. The argument \code{panel} is passed to \code{\link{pairs.default}}. See the help for \code{\link{panel.image}} and \code{\link{panel.contour}}. } \section{Low Level Control of Graphics}{ To control the appearance of the individual scatterplot panels, see \code{\link{pairs.default}}, \code{\link{points}} or \code{\link{par}}. To control the plotting symbol for the points in the scatterplot, use the arguments \code{pch}, \code{col}, \code{bg} as described under \code{\link{points}} (because the default panel plotter is the function \code{\link{points}}). To suppress the tick marks on the plot axes, type \code{par(xaxt="n", yaxt="n")} before calling \code{pairs}. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs}}, \code{\link{pairs.default}}, \code{\link{panel.contour}}, \code{\link{panel.image}}, \code{\link[spatstat.geom]{plot.im}}, \code{\link{cov.im}}, \code{\link[spatstat.geom]{im}}, \code{\link{par}} } \examples{ X <- density(rpoispp(30)) Y <- density(rpoispp(40)) Z <- density(rpoispp(30)) p <- pairs(X,Y,Z) p plot(p) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat.explore/man/clarkevans.Rd0000644000176200001440000001062614643125461017000 0ustar liggesusers\name{clarkevans} \alias{clarkevans} \title{Clark and Evans Aggregation Index} \description{ Computes the Clark and Evans aggregation index \eqn{R} for a spatial point pattern. } \usage{ clarkevans(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{correction}{ Character vector. The type of edge correction(s) to be applied. } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See Details. } } \details{ The Clark and Evans (1954) aggregation index \eqn{R} is a crude measure of clustering or ordering of a point pattern. It is the ratio of the observed mean nearest neighbour distance in the pattern to that expected for a Poisson point process of the same intensity. A value \eqn{R>1} suggests ordering, while \eqn{R<1} suggests clustering. Without correction for edge effects, the value of \code{R} will be positively biased. Edge effects arise because, for a point of \code{X} close to the edge of the window, the true nearest neighbour may actually lie outside the window. Hence observed nearest neighbour distances tend to be larger than the true nearest neighbour distances. The argument \code{correction} specifies an edge correction or several edge corrections to be applied. It is a character vector containing one or more of the options \code{"none"}, \code{"Donnelly"}, \code{"guard"} and \code{"cdf"} (which are recognised by partial matching). These edge corrections are: \describe{ \item{"none":}{ No edge correction is applied. } \item{"Donnelly":}{ Edge correction of Donnelly (1978), available for rectangular windows only. The theoretical expected value of mean nearest neighbour distance under a Poisson process is adjusted for edge effects by the edge correction of Donnelly (1978). The value of \eqn{R} is the ratio of the observed mean nearest neighbour distance to this adjusted theoretical mean. } \item{"guard":}{ Guard region or buffer area method. The observed mean nearest neighbour distance for the point pattern \code{X} is re-defined by averaging only over those points of \code{X} that fall inside the sub-window \code{clipregion}. } \item{"cdf":}{ Cumulative Distribution Function method. The nearest neighbour distance distribution function \eqn{G(r)} of the stationary point process is estimated by \code{\link{Gest}} using the Kaplan-Meier type edge correction. Then the mean of the distribution is calculated from the cdf. } } Alternatively \code{correction="all"} selects all options. If the argument \code{clipregion} is given, then the selected edge corrections will be assumed to include \code{correction="guard"}. To perform a test based on the Clark-Evans index, see \code{\link{clarkevans.test}}. } \value{ A numeric value, or a numeric vector with named components \item{naive}{\eqn{R} without edge correction} \item{Donnelly}{\eqn{R} using Donnelly edge correction} \item{guard}{\eqn{R} using guard region} \item{cdf}{\eqn{R} using cdf method} (as selected by \code{correction}). The value of the \code{Donnelly} component will be \code{NA} if the window of \code{X} is not a rectangle. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In I. Hodder (ed.) \emph{Simulation studies in archaeology}, Cambridge/New York: Cambridge University Press, pp 91--95. } \author{ John Rudge \email{rudge@esc.cam.ac.uk} with modifications by \adrian } \seealso{ \code{\link{clarkevans.test}}, \code{\link{hopskel}}, \code{\link[spatstat.geom]{nndist}}, \code{\link{Gest}} } \examples{ # Example of a clustered pattern clarkevans(redwood) # Example of an ordered pattern clarkevans(cells) # Random pattern X <- rpoispp(100) clarkevans(X) # How to specify a clipping region clip1 <- owin(c(0.1,0.9),c(0.1,0.9)) clip2 <- erosion(Window(cells), 0.1) clarkevans(cells, clipregion=clip1) clarkevans(cells, clipregion=clip2) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/boyce.Rd0000644000176200001440000001351514611073323015742 0ustar liggesusers\name{boyce} \alias{boyce} \title{ Boyce Index } \description{ Calculate the discrete or continuous Boyce index for a spatial point pattern dataset. } \usage{ boyce(X, Z, \dots, breaks = NULL, halfwidth = NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{Z}{ Habitat suitability classes or habitat suitability index. Either a tessellation (object of class \code{"tess"}) or a spatial covariate such as a pixel image (object of class \code{"im"}), a \code{function(x,y)} or one of the letters \code{"a", "b"} representing the cartesian coordinates. } \item{\dots}{ Additional arguments passed to \code{\link{rhohat.ppp}}. } \item{breaks}{ The breakpoint values defining discrete bands of values of the covariate \code{Z} for which the discrete Boyce index will be calculated. Either a numeric vector of breakpoints for \code{Z}, or a single integer specifying the number of evenly-spaced breakpoints. Incompatible with \code{halfwidth}. } \item{halfwidth}{ The half-width \eqn{h} of the interval \eqn{[z-h,z+h]} which will be used to calculate the continuous Boyce index \eqn{B(z)} for each possible value \eqn{z} of the covariate \eqn{Z}. } } \details{ Given a spatial point pattern \code{X} and some kind of explanatory information \code{Z}, this function computes either the index originally defined by Boyce et al (2002) or the \sQuote{continuous Boyce index} defined by Hirzel et al (2006). Boyce et al (2002) defined an index of habitat suitability in which the study region \eqn{W} is first divided into separate subregions \eqn{C_1,\ldots,C_m}{C[1], C[2], ..., C[m]} based on appropriate scientific considerations. Then we count the number \eqn{n_j}{n[j]} of data points of \code{X} that fall in each subregion \eqn{C_j}{C[j]}, measure the area \eqn{a_j}{a[j]} of each subregion \eqn{C_j}{C[j]}, and calculate the index \deqn{ B_j = \frac{n_j/n}{a_j/a} }{ B[j] = (n[j]/n)/(a[j]/a) } where \eqn{a} is the total area and \eqn{n} is the total number of points in \code{X}. Hirzel et al (2006) defined another version of this index which is based on a continuous spatial covariate. For each possible value \eqn{z} of the covariate \eqn{Z}, consider the region \eqn{C(z)} where the value of the covariate lies between \eqn{z-h} and \eqn{z+h}, where \eqn{h} is the chosen \sQuote{halfwidth}. The \sQuote{continuous Boyce index} is \deqn{ B(z) = \frac{n(z)/n}{a(z)/a} }{ B(z) = (n(z)/n)/(a(z)/a) } where \eqn{n(z)} is the number of points of \code{X} falling in \eqn{C(z)}, and \eqn{a(z)} is the area of \eqn{C(z)}. If \code{Z} is a tessellation (object of class \code{"tess"}), the algorithm calculates the original (\sQuote{discrete}) Boyce index (Boyce et al, 2002) for each tile of the tessellation. The result is another tessellation, identical to \code{Z} except that the mark values are the values of the discrete Boyce index. If \code{Z} is a pixel image whose values are categorical (i.e. factor values), then \code{Z} is treated as a tessellation, with one tile for each level of the factor. The discrete Boyce index is then calculated. The result is a tessellation with marks that are the values of the discrete Boyce index. Otherwise, if \code{Z} is a spatial covariate such as a pixel image, a \code{function(x,y)} or one of the characters \code{"x"} or \code{"y"}, then exactly one of the arguments \code{breaks} or \code{halfwidth} must be given. \itemize{ \item if \code{halfwidth} is given, it should be a single positive number. The continuous Boyce index (Hirzel et al, 2006) is computed using the specified halfwidth \eqn{h}. The result is an object of class \code{"fv"} that can be plotted to show \eqn{B(z)} as a function of \eqn{z}. \item if \code{breaks} is given, it can be either a numeric vector of possible values of \code{Z} defining the breakpoints for the bands of values of \code{Z}, or a single integer specifying the number of evenly-spaced breakpoints that should be created. The discrete Boyce index (Boyce et al, 2002) is computed. The result is an object of class \code{"fv"} that can be plotted to show the discrete Boyce index as a function of \eqn{z}. } When \code{Z} is a spatial covariate (not factor-valued), the calculation is performed using \code{\link{rhohat.ppp}} (since the Boyce index is a special case of \code{rhohat}). Arguments \code{\dots} passed to \code{\link{rhohat.ppp}} control the accuracy of the spatial discretisation and other parameters of the algorithm. } \value{ A tessellation (object of class \code{"tess"}) or a function value table (object of class \code{"fv"}) as explained above. } \references{ Boyce, M.S., Vernier, P.R., Nielsen, S.E. and Schmiegelow, F.K.A. (2002) Evaluating resource selection functions. \emph{Ecological modelling} \bold{157}, 281--300. Hirzel, A.H., Le Lay, V., Helfer, V., Randin, C. and Guisan, A. (2006) Evaluating the ability of habitat suitability models to predict species presences. \emph{Ecological Modelling} \bold{199}, 142--152. } \author{ \adrian } \seealso{ \code{\link{rhohat}} } \examples{ online <- interactive() ## a simple tessellation V <- quadrats(Window(bei), 4, 3) if(online) plot(V) ## discrete Boyce index for a simple tessellation A <- boyce(bei, V) if(online) { plot(A, do.col=TRUE) marks(A) tilenames(A) } ## spatial covariate: terrain elevation Z <- bei.extra$elev ## continuous Boyce index for terrain elevation BC <- boyce(bei, Z, halfwidth=10) if(online) plot(BC) ## discrete Boyce index for terrain elevation steps of height 5 metres bk <- c(seq(min(Z), max(Z), by=5), Inf) BD <- boyce(bei, Z, breaks=bk) if(online) plot(BD) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Kscaled.Rd0000644000176200001440000002171014643125461016211 0ustar liggesusers\name{Kscaled} \alias{Kscaled} \alias{Lscaled} \title{Locally Scaled K-function} \description{ Estimates the locally-rescaled \eqn{K}-function of a point process. } \usage{ Kscaled(X, lambda=NULL, \dots, r = NULL, breaks = NULL, rmax = 2.5, correction=c("border", "isotropic", "translate"), renormalise=FALSE, normpower=1, sigma=NULL, varcov=NULL) Lscaled(\dots) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the locally scaled \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link[spatstat.geom]{as.ppp}()}. } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a \code{function(x,y)} which can be evaluated to give the intensity value at any location, or a fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed from \code{Lscaled} to \code{Kscaled} and from \code{Kscaled} to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the locally scaled \eqn{K} function should be evaluated. (These are rescaled distances.) Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{rmax}{ maximum value of the argument \eqn{r} that should be used. (This is the rescaled distance). } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. } \details{ \code{Kscaled} computes an estimate of the \eqn{K} function for a locally scaled point process. \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. Locally scaled point processes are a class of models for inhomogeneous point patterns, introduced by Hahn et al (2003). They include inhomogeneous Poisson processes, and many other models. The template \eqn{K} function of a locally-scaled process is a counterpart of the ``ordinary'' Ripley \eqn{K} function, in which the distances between points of the process are measured on a spatially-varying scale (such that the locally rescaled process has unit intensity). The template \eqn{K} function is an indicator of interaction between the points. For an inhomogeneous Poisson process, the theoretical template \eqn{K} function is approximately equal to \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Values \eqn{K_{\rm scaled}(r) > \pi r^2}{Kscaled(r) > pi * r^2} are suggestive of clustering. \code{Kscaled} computes an estimate of the template \eqn{K} function and \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. The locally scaled interpoint distances are computed using an approximation proposed by Hahn (2007). The Euclidean distance between two points is multiplied by the average of the square roots of the intensity values at the two points. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. If \code{renormalise=TRUE}, the estimated intensity \code{lambda} is multiplied by \eqn{c^(normpower/2)} before performing other calculations, where \eqn{c = area(W)/sum[i] (1/lambda(x[i]))}. This renormalisation has about the same effect as in \code{\link{Kinhom}}, reducing the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. Edge corrections are used to correct bias in the estimation of \eqn{K_{\rm scaled}}{Kscaled}. First the interpoint distances are rescaled, and then edge corrections are applied as in \code{\link{Kest}}. See \code{\link{Kest}} for details of the edge corrections and the options for the argument \code{correction}. The pair correlation function can also be applied to the result of \code{Kscaled}; see \code{\link{pcf}} and \code{\link{pcf.fv}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. Hahn, U. (2007) \emph{Global and Local Scaling in the Statistics of Spatial Point Processes}. Habilitationsschrift, Universitaet Augsburg. Hahn, U., Jensen, E.B.V., van Lieshout, M.N.M. and Nielsen, L.S. (2003) Inhomogeneous spatial point processes by location-dependent scaling. \emph{Advances in Applied Probability} \bold{35}, 319--336. \Prokesova, M., Hahn, U. and Vedel Jensen, E.B. (2006) Statistics for locally scaled point patterns. In A. Baddeley, P. Gregori, J. Mateu, R. Stoica and D. Stoyan (eds.) \emph{Case Studies in Spatial Point Pattern Modelling}. Lecture Notes in Statistics 185. New York: Springer Verlag. Pages 99--123. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ X <- unmark(bronzefilter) K <- Kscaled(X) if(require("spatstat.model")) { fit <- ppm(X, ~x) lam <- predict(fit) K <- Kscaled(X, lam) } } \author{Ute Hahn, \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Kmulti.inhom.Rd0000644000176200001440000002627014643125461017227 0ustar liggesusers\name{Kmulti.inhom} \alias{Kmulti.inhom} \title{ Inhomogeneous Marked K-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}, adjusted for spatially varying intensity. } \usage{ Kmulti.inhom(X, I, J, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process \code{X[I]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[I]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity of the sub-process \code{X[J]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[J]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{Ignored.} \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points, the first point belonging to subset \code{I} and the second point to subset \code{J}. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. } \details{ The function \code{Kmulti.inhom} is the counterpart, for spatially-inhomogeneous marked point patterns, of the multitype \eqn{K} function \code{\link{Kmulti}}. Suppose \eqn{X} is a marked point process, with marks of any kind. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are two sub-processes, possibly overlapping. Typically \eqn{X_I}{X[I]} would consist of those points of \eqn{X} whose marks lie in a specified range of mark values, and similarly for \eqn{X_J}{X[J]}. Suppose that \eqn{\lambda_I(u)}{lambdaI(u)}, \eqn{\lambda_J(u)}{lambdaJ(u)} are the spatially-varying intensity functions of \eqn{X_I}{X[I]} and \eqn{X_J}{X[J]} respectively. Consider all the pairs of points \eqn{(u,v)} in the point process \eqn{X} such that the first point \eqn{u} belongs to \eqn{X_I}{X[I]}, the second point \eqn{v} belongs to \eqn{X_J}{X[J]}, and the distance between \eqn{u} and \eqn{v} is less than a specified distance \eqn{r}. Give this pair \eqn{(u,v)} the numerical weight \eqn{1/(\lambda_I(u)\lambda_J(u))}{1/(lambdaI(u) lambdaJ(u))}. Calculate the sum of these weights over all pairs of points as described. This sum (after appropriate edge-correction and normalisation) is the estimated inhomogeneous multitype \eqn{K} function. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process identified by index \code{I}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the intensity of \code{X[I]} at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the intensity of \code{X[I]} evaluated only at the data points of \code{X[I]}. The length of this vector must equal the number of points in \code{X[I]}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} supplies the values of the intensity of the sub-process identified by index \code{J}. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link[graphics]{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Biases due to edge effects are treated in the same manner as in \code{\link{Kinhom}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti.inhom}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kmulti}}, \code{\link{Kdot.inhom}}, \code{\link{Kcross.inhom}}, \code{\link{pcf}} } \examples{ # Finnish Pines data: marked by diameter and height plot(finpines, which.marks="height") II <- (marks(finpines)$height <= 2) JJ <- (marks(finpines)$height > 3) K <- Kmulti.inhom(finpines, II, JJ) plot(K) # functions determining subsets f1 <- function(X) { marks(X)$height <= 2 } f2 <- function(X) { marks(X)$height > 3 } K <- Kmulti.inhom(finpines, f1, f2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/macros/0000755000176200001440000000000014611073322015630 5ustar liggesusersspatstat.explore/man/macros/defns.Rd0000755000176200001440000001076314611073322017230 0ustar liggesusers%% macro definitions for spatstat man pages %% Authors \newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}} \newcommand{\rolf}{Rolf Turner \email{rolfturner@posteo.net}} \newcommand{\ege}{Ege Rubak \email{rubak@math.aau.dk}} \newcommand{\spatstatAuthors}{\adrian, \rolf and \ege} \newcommand{\spatstatAuthorsComma}{\adrian, \rolf, \ege} %% Contributors with emails \newcommand{\pavel}{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su}} \newcommand{\dominic}{Dominic Schuhmacher \email{dominic.schuhmacher@mathematik.uni-goettingen.de}, URL \code{http://dominic.schuhmacher.name/}} \newcommand{\wei}{Ang Qi Wei \email{aqw07398@hotmail.com}} \newcommand{\colette}{Marie-Colette van Lieshout \email{Marie-Colette.van.Lieshout@cwi.nl}} \newcommand{\rasmus}{Rasmus Plenge Waagepetersen \email{rw@math.auc.dk}} \newcommand{\abdollah}{Abdollah Jalilian \email{jalilian@razi.ac.ir}} \newcommand{\ottmar}{Ottmar Cronie \email{ottmar@chalmers.se}} \newcommand{\stephenEglen}{Stephen Eglen \email{S.J.Eglen@damtp.cam.ac.uk}} \newcommand{\mehdi}{Mehdi Moradi \email{m2.moradi@yahoo.com}} \newcommand{\yamei}{Ya-Mei Chang \email{yamei628@gmail.com}} \newcommand{\martinH}{Martin Hazelton \email{Martin.Hazelton@otago.ac.nz}} \newcommand{\tilman}{Tilman Davies \email{Tilman.Davies@otago.ac.nz}} % Names with accents \newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}} \newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}} \newcommand{\Francois}{\ifelse{latex}{\out{Fran\c{c}ois}}{Francois}} \newcommand{\Frederic}{\ifelse{latex}{\out{Fr{\'e}d{\'e}ric}}{Frederic}} \newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}} \newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}} \newcommand{\Lucia}{\ifelse{latex}{\out{Luc\'{\i{}}a}}{Lucia}} \newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}} \newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}} \newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}} \newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}} \newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}} \newcommand{\Sanchez}{\ifelse{latex}{\out{S\'{a}nchez}}{Sanchez}} \newcommand{\Martin}{\ifelse{latex}{\out{Mart\'{\i}n}}{Martin}} \newcommand{\Dominguez}{\ifelse{latex}{\out{Dom\'{\i}nguez}}{Dominguez}} \newcommand{\Rodriguez}{\ifelse{latex}{\out{Rodr\'{\i}guez}}{Rodriguez}} \newcommand{\Gonzalez}{\ifelse{latex}{\out{Gonz\'{a}lez}}{Gonzalez}} %% List of all Gibbs interactions \newcommand{\GibbsInteractionsList}{\code{\link[MPKG]{AreaInter}}, \code{\link[MPKG]{BadGey}}, \code{\link[MPKG]{Concom}}, \code{\link[MPKG]{DiggleGatesStibbard}}, \code{\link[MPKG]{DiggleGratton}}, \code{\link[MPKG]{Fiksel}}, \code{\link[MPKG]{Geyer}}, \code{\link[MPKG]{Hardcore}}, \code{\link[MPKG]{HierHard}}, \code{\link[MPKG]{HierStrauss}}, \code{\link[MPKG]{HierStraussHard}}, \code{\link[MPKG]{Hybrid}}, \code{\link[MPKG]{LennardJones}}, \code{\link[MPKG]{MultiHard}}, \code{\link[MPKG]{MultiStrauss}}, \code{\link[MPKG]{MultiStraussHard}}, \code{\link[MPKG]{OrdThresh}}, \code{\link[MPKG]{Ord}}, \code{\link[MPKG]{Pairwise}}, \code{\link[MPKG]{PairPiece}}, \code{\link[MPKG]{Penttinen}}, \code{\link[MPKG]{Poisson}}, \code{\link[MPKG]{Saturated}}, \code{\link[MPKG]{SatPiece}}, \code{\link[MPKG]{Softcore}}, \code{\link[MPKG]{Strauss}}, \code{\link[MPKG]{StraussHard}} and \code{\link[MPKG]{Triplets}}} %% List of interactions recognised by RMH code \newcommand{\rmhInteractionsList}{\code{\link[MPKG]{AreaInter}}, \code{\link[MPKG]{BadGey}}, \code{\link[MPKG]{DiggleGatesStibbard}}, \code{\link[MPKG]{DiggleGratton}}, \code{\link[MPKG]{Fiksel}}, \code{\link[MPKG]{Geyer}}, \code{\link[MPKG]{Hardcore}}, \code{\link[MPKG]{Hybrid}}, \code{\link[MPKG]{LennardJones}}, \code{\link[MPKG]{MultiStrauss}}, \code{\link[MPKG]{MultiStraussHard}}, \code{\link[MPKG]{PairPiece}}, \code{\link[MPKG]{Penttinen}}, \code{\link[MPKG]{Poisson}}, \code{\link[MPKG]{Softcore}}, \code{\link[MPKG]{Strauss}}, \code{\link[MPKG]{StraussHard}} and \code{\link[MPKG]{Triplets}}} %% Frequent references \newcommand{\baddrubaturnbook}{Baddeley, A., Rubak, E. and Turner, R. (2015) \emph{Spatial Point Patterns: Methodology and Applications with R}. Chapman and Hall/CRC Press. } %% Citations of recent articles that will change rapidly \newcommand{\baddchangclustersim}{Baddeley, A. and Chang, Y.-M. (2023) Robust algorithms for simulating cluster point processes. \emph{Journal of Statistical Computation and Simulation}. In Press. DOI \code{10.1080/00949655.2023.2166045}.} spatstat.explore/man/localpcf.Rd0000644000176200001440000001571214611073324016426 0ustar liggesusers\name{localpcf} \alias{localpcf} \alias{localpcfinhom} \title{Local pair correlation function} \description{ Computes individual contributions to the pair correlation function from each data point. } \usage{ localpcf(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, rvalue=NULL) localpcfinhom(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL, update=TRUE, leaveoneout=TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{delta}{ Smoothing bandwidth for pair correlation. The halfwidth of the Epanechnikov kernel. } \item{rmax}{ Optional. Maximum value of distance \eqn{r} for which pair correlation values \eqn{g(r)} should be computed. } \item{nr}{ Optional. Number of values of distance \eqn{r} for which pair correlation \eqn{g(r)} should be computed. } \item{stoyan}{ Optional. The value of the constant \eqn{c} in Stoyan's rule of thumb for selecting the smoothing bandwidth \code{delta}. } \item{lambda}{ Optional. Values of the estimated intensity function, for the inhomogeneous pair correlation. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{sigma,varcov,\dots}{ These arguments are ignored by \code{localpcf} but are passed by \code{localpcfinhom} (when \code{lambda=NULL}) to the function \code{\link{density.ppp}} to control the kernel smoothing estimation of \code{lambda}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the local pair correlation should be computed. } } \details{ \code{localpcf} computes the contribution, from each individual data point in a point pattern \code{X}, to the empirical pair correlation function of \code{X}. These contributions are sometimes known as LISA (local indicator of spatial association) functions based on pair correlation. \code{localpcfinhom} computes the corresponding contribution to the \emph{inhomogeneous} empirical pair correlation function of \code{X}. Given a spatial point pattern \code{X}, the local pcf \eqn{g_i(r)}{g[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ g_i(r) = \frac a {2 \pi n} \sum_j k(d_{i,j} - r) }{ g[i](r) = (a/(2 * pi * n) * sum[j] k(d[i,j] - r) } where the sum is over all points \eqn{j \neq i}{j != i}, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{d_{ij}}{d[i,j]} is the distance between points \code{i} and \code{j}. Here \code{k} is the Epanechnikov kernel, \deqn{ k(t) = \frac 3 { 4\delta} \max(0, 1 - \frac{t^2}{\delta^2}). }{ k(t) = (3/(4*delta)) * max(0, 1 - t^2/delta^2). } Edge correction is performed using the border method (for the sake of computational efficiency): the estimate \eqn{g_i(r)}{g[i](r)} is set to \code{NA} if \eqn{r > b_i}{r > b[i]}, where \eqn{b_i}{b[i]} is the distance from point \eqn{i} to the boundary of the observation window. The smoothing bandwidth \eqn{\delta}{delta} may be specified. If not, it is chosen by Stoyan's rule of thumb \eqn{\delta = c/\hat\lambda}{delta = c/lambda} where \eqn{\hat\lambda = n/a}{lambda = n/a} is the estimated intensity and \eqn{c} is a constant, usually taken to be 0.15. The value of \eqn{c} is controlled by the argument \code{stoyan}. For \code{localpcfinhom}, the optional argument \code{lambda} specifies the values of the estimated intensity function. If \code{lambda} is given, it should be either a numeric vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If \code{lambda} is not given, then it will be estimated using a leave-one-out kernel density smoother as described in \code{\link{pcfinhom}}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the local pair correlation function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{localK}}, \code{\link{localKinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}} } \examples{ X <- ponderosa g <- localpcf(X, stoyan=0.5) colo <- c(rep("grey", npoints(X)), "blue") a <- plot(g, main=c("local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) # plot only the local pair correlation function for point number 7 plot(g, est007 ~ r) # Extract the local pair correlation at distance 15 metres, for each point g15 <- localpcf(X, rvalue=15, stoyan=0.5) g15[1:10] # Check that the value for point 7 agrees with the curve for point 7: points(15, g15[7], col="red") # Inhomogeneous gi <- localpcfinhom(X, stoyan=0.5) a <- plot(gi, main=c("inhomogeneous local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/dclf.sigtrace.Rd0000644000176200001440000001371214611073323017350 0ustar liggesusers\name{dclf.sigtrace} \alias{dclf.sigtrace} \alias{mad.sigtrace} \alias{mctest.sigtrace} \title{ Significance Trace of Cressie-Loosmore-Ford or Maximum Absolute Deviation Test } \description{ Generates a Significance Trace of the Diggle(1986)/ Cressie (1991)/ Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.sigtrace(X, \dots) mad.sigtrace(X, \dots) mctest.sigtrace(X, fun=Lest, \dots, exponent=1, interpolate=FALSE, alpha=0.05, confint=TRUE, rmin=0) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} or \code{\link{mctest.progress}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{alternative} to specify a one-sided test, and \code{verbose=FALSE} to turn off the messages. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{exponent}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } \item{alpha}{ Significance level to be plotted (this has no effect on the calculation but is simply plotted as a reference value). } \item{confint}{ Logical value indicating whether to compute a confidence interval for the \sQuote{true} \eqn{p}-value. } \item{rmin}{ Optional. Left endpoint for the interval of \eqn{r} values on which the test statistic is calculated. } } \details{ The Diggle (1986)/ Cressie (1991)/Loosmore and Ford (2006) test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{significance trace} (Bowman and Azzalini, 1997; Baddeley et al, 2014, 2015; Baddeley, Rubak and Turner, 2015) of the test is a plot of the \eqn{p}-value obtained from the test against the length of the interval \code{rinterval}. The command \code{dclf.sigtrace} performs \code{\link{dclf.test}} on \code{X} using all possible intervals of the form \eqn{[0,R]}, and returns the resulting \eqn{p}-values as a function of \eqn{R}. Similarly \code{mad.sigtrace} performs \code{\link{mad.test}} using all possible intervals and returns the \eqn{p}-values. More generally, \code{mctest.sigtrace} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{exponent}. The case \code{exponent=2} is the Cressie-Loosmore-Ford test, while \code{exponent=Inf} is the MAD test. If the argument \code{rmin} is given, it specifies the left endpoint of the interval defining the test statistic: the tests are performed using intervals \eqn{[r_{\mbox{\scriptsize min}},R]}{[rmin,R]} where \eqn{R \ge r_{\mbox{\scriptsize min}}}{R \ge rmin}. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the significance trace. The plot shows the Monte Carlo \eqn{p}-value (solid black line), the critical value \code{0.05} (dashed red line), and a pointwise 95\% confidence band (grey shading) for the \sQuote{true} (Neyman-Pearson) \eqn{p}-value. The confidence band is based on the Agresti-Coull (1998) confidence interval for a binomial proportion (when \code{interpolate=FALSE}) or the delta method and normal approximation (when \code{interpolate=TRUE}). If \code{X} is an envelope object and \code{fun=NULL} then the code will re-use the simulated functions stored in \code{X}. } \value{ An object of class \code{"fv"} that can be plotted to obtain the significance trace. } \references{ Agresti, A. and Coull, B.A. (1998) Approximate is better than \dQuote{Exact} for interval estimation of binomial proportions. \emph{American Statistician} \bold{52}, 119--126. Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2015) Pushing the envelope: extensions of graphical Monte Carlo tests. Unpublished manuscript. \baddrubaturnbook Bowman, A.W. and Azzalini, A. (1997) \emph{Applied smoothing techniques for data analysis: the kernel approach with S-Plus illustrations}. Oxford University Press, Oxford. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{dclf.test}} for the tests; \code{\link{dclf.progress}} for progress plots. See \code{\link{plot.fv}} for information on plotting objects of class \code{"fv"}. See also \code{\link{dg.sigtrace}}. } \examples{ plot(dclf.sigtrace(cells, Lest, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat.explore/man/densityHeat.Rd0000644000176200001440000000244214611073322017116 0ustar liggesusers\name{densityHeat} \alias{densityHeat} \title{ Diffusion Estimate of Point Pattern Intensity } \description{ Computes a diffusion estimate of intensity for a point pattern. } \usage{ densityHeat(x, sigma, \dots) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"} or another class). } \item{sigma}{ Smoothing bandwidth. Usually a single number giving the equivalent standard deviation of the smoother. } \item{\dots}{ Additional arguments depending on the method. } } \details{ The generic function \code{densityHeat} computes an estimate of point process intensity using a diffusion kernel method. Further details depend on the class of point pattern \code{x}. See the help file for the appropriate method. } \value{ Depends on the class of \code{x}. } \seealso{ For two-dimensional point patterns (objects of class \code{"ppp"}), the diffusion kernel estimator is \code{\link{densityHeat.ppp}}. The usual kernel estimator is \code{\link{density.ppp}}, and the tessellation-based estimator is \code{\link{adaptive.density}}. %% For point patterns on a linear network (objects of class %% \code{"lpp"}), see \code{\link[spatstat.linnet]{densityHeat.lpp}}. } \author{ Adrian Baddeley and Tilman Davies. } \keyword{spatial} \keyword{smooth} spatstat.explore/man/Fest.Rd0000644000176200001440000002722314643125461015551 0ustar liggesusers\name{Fest} \alias{Fest} \alias{Fhazard} \title{Estimate the Empty Space Function or its Hazard Rate} \description{ Estimates the empty space function \eqn{F(r)} or its hazard rate \eqn{h(r)} from a point pattern in a window of arbitrary shape. } \usage{ Fest(X, \dots, eps, r=NULL, breaks=NULL, correction=c("rs", "km", "cs"), domain=NULL) Fhazard(X, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{F(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{\dots}{ Extra arguments, passed from \code{Fhazard} to \code{Fest}. Extra arguments to \code{Fest} are ignored. } \item{eps}{Optional. A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{F(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{F(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. The result of \code{Fest} is essentially a data frame containing up to seven columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{F(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{F(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{F(r)} by the spatial Kaplan-Meier method } \item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)} } \item{raw}{the uncorrected estimate of \eqn{F(r)}, i.e. the empirical distribution of the distance from a random point in the window to the nearest point of the data pattern \code{X} } \item{theo}{the theoretical value of \eqn{F(r)} for a stationary Poisson process of the same estimated intensity. } The result of \code{Fhazard} contains only three columns \item{r}{the values of the argument \eqn{r} at which the hazard rate \eqn{h(r)} has been estimated } \item{hazard}{the spatial Kaplan-Meier estimate of the hazard rate \eqn{h(r)}} \item{theo}{ the theoretical value of \eqn{h(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ \code{Fest} computes an estimate of the empty space function \eqn{F(r)}, and \code{Fhazard} computes an estimate of its hazard rate \eqn{h(r)}. The empty space function (also called the ``\emph{spherical contact distribution}'' or the ``\emph{point-to-nearest-event}'' distribution) of a stationary point process \eqn{X} is the cumulative distribution function \eqn{F} of the distance from a fixed point in space to the nearest point of \eqn{X}. An estimate of \eqn{F} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{F} is a useful statistic summarising the sizes of gaps in the pattern. For inferential purposes, the estimate of \eqn{F} is usually compared to the true value of \eqn{F} for a completely random (Poisson) point process, which is \deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{% F(r) = 1 - exp( - \lambda * \pi * r^2) % } where \eqn{\lambda}{\lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{F} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the empty space function \eqn{F} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link[spatstat.geom]{as.ppp}}. The algorithm uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. (See below for details.) First-time users are strongly advised not to specify these arguments. The estimation of \eqn{F} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or "\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and Stoyan, 1998). Our implementation makes essential use of the distance transform algorithm of image processing (Borgefors, 1986). A fine grid of pixels is created in the observation window. The Euclidean distance between two pixels is approximated by the length of the shortest path joining them in the grid, where a path is a sequence of steps between adjacent pixels, and horizontal, vertical and diagonal steps have length \eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)} respectively in pixel units. If the pixel grid is sufficiently fine then this is an accurate approximation. The parameter \code{eps} is the pixel width of the rectangular raster used to compute the distance transform (see below). It must not be too large: the absolute error in distance values due to discretisation is bounded by \code{eps}. If \code{eps} is not specified, the function checks whether the window \code{Window(X)} contains pixel raster information. If so, then \code{eps} is set equal to the pixel width of the raster; otherwise, \code{eps} defaults to 1/100 of the width of the observation window. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{F(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the spacing of successive \code{r} values must be very fine (ideally not greater than \code{eps/4}). The algorithm also returns an estimate of the hazard rate function, \eqn{h(r)} of \eqn{F(r)}. The hazard rate is defined by \deqn{h(r) = - \frac{d}{dr} \log(1 - F(r))}{% h(r) = - (d/dr) log(1 - F(r)) % } The hazard rate of \eqn{F} has been proposed as a useful exploratory statistic (Baddeley and Gill, 1994). The estimate of \eqn{h(r)} given here is a discrete approximation to the hazard rate of the Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is absolutely continuous (for any stationary point process \eqn{X}), so the hazard function always exists (Baddeley and Gill, 1997). If the argument \code{domain} is given, the estimate of \eqn{F(r)} will be based only on the empty space distances measured from locations inside \code{domain} (although their nearest data points may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link[spatstat.geom]{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each location in the window to the nearest point of the data pattern, is a biased estimate of \eqn{F}. However this is also returned by the algorithm (if \code{correction="none"}), as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}. } \note{ Sizeable amounts of memory may be needed during the calculation. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344-371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The reduced sample (border method) estimator of \eqn{F} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing but its maximum value may be less than \eqn{1}. The estimate of hazard rate \eqn{h(r)} returned by the algorithm is an approximately unbiased estimate for the integral of \eqn{h()} over the corresponding histogram cell. It may exhibit oscillations due to discretisation effects. We recommend modest smoothing, such as kernel smoothing with kernel width equal to the width of a histogram cell, using \code{\link{Smooth.fv}}. } \seealso{ \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link[spatstat.univar]{km.rs}}, \code{\link[spatstat.univar]{reduced.sample}}, \code{\link[spatstat.univar]{kaplan.meier}} } \examples{ Fc <- Fest(cells, 0.01) # Tip: don't use F for the left hand side! # That's an abbreviation for FALSE plot(Fc) # P-P style plot plot(Fc, cbind(km, theo) ~ theo) # The empirical F is above the Poisson F # indicating an inhibited pattern if(interactive()) { plot(Fc, . ~ theo) plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo))) } \testonly{ Fh <- Fhazard(cells) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/clusterset.Rd0000644000176200001440000001132714643125461017043 0ustar liggesusers\name{clusterset} \alias{clusterset} \title{ Allard-Fraley Estimator of Cluster Feature } \description{ Detect high-density features in a spatial point pattern using the (unrestricted) Allard-Fraley estimator. } \usage{ clusterset(X, what=c("marks", "domain"), \dots, verbose=TRUE, fast=FALSE, exact=!fast) } \arguments{ \item{X}{ A dimensional spatial point pattern (object of class \code{"ppp"}). } \item{what}{ Character string or character vector specifying the type of result. See Details. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical. If \code{FALSE} (the default), the Dirichlet tile areas will be computed exactly using polygonal geometry, so that the optimal choice of tiles will be computed exactly. If \code{TRUE}, the Dirichlet tile areas will be approximated using pixel counting, so the optimal choice will be approximate. } \item{exact}{ Logical. If \code{TRUE}, the Allard-Fraley estimator of the domain will be computed exactly using polygonal geometry. If \code{FALSE}, the Allard-Fraley estimator of the domain will be approximated by a binary pixel mask. The default is initially set to \code{FALSE}. } \item{\dots}{ Optional arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution if \code{exact=FALSE}. } } \details{ Allard and Fraley (1997) developed a technique for recognising features of high density in a spatial point pattern in the presence of random clutter. This algorithm computes the \emph{unrestricted} Allard-Fraley estimator. The Dirichlet (Voronoi) tessellation of the point pattern \code{X} is computed. The smallest \code{m} Dirichlet cells are selected, where the number \code{m} is determined by a maximum likelihood criterion. \itemize{ \item If \code{fast=FALSE} (the default), the areas of the tiles of the Dirichlet tessellation will be computed exactly using polygonal geometry. This ensures that the optimal selection of tiles is computed exactly. \item If \code{fast=TRUE}, the Dirichlet tile areas will be approximated by counting pixels. This is faster, and is usually correct (depending on the pixel resolution, which is controlled by the arguments \code{\dots}). } The type of result depends on the character vector \code{what}. \itemize{ \item If \code{what="marks"} the result is the point pattern \code{X} with a vector of marks labelling each point with a value \code{yes} or \code{no} depending on whether the corresponding Dirichlet cell is selected by the Allard-Fraley estimator. In other words each point of \code{X} is labelled as either a cluster point or a non-cluster point. \item If \code{what="domain"}, the result is the Allard-Fraley estimator of the cluster feature set, which is the union of all the selected Dirichlet cells, represented as a window (object of class \code{"owin"}). \item If \code{what=c("marks", "domain")} the result is a list containing both of the results described above. } Computation of the Allard-Fraley set estimator depends on the argument \code{exact}. \itemize{ \item If \code{exact=TRUE} (the default), the Allard-Fraley set estimator will be computed exactly using polygonal geometry. The result is a polygonal window. \item If \code{exact=FALSE}, the Allard-Fraley set estimator will be approximated by a binary pixel mask. This is faster than the exact computation. The result is a binary mask. } } \value{ If \code{what="marks"}, a multitype point pattern (object of class \code{"ppp"}). If \code{what="domain"}, a window (object of class \code{"owin"}). If \code{what=c("marks", "domain")} (the default), a list consisting of a multitype point pattern and a window. } \references{ Allard, D. and Fraley, C. (1997) Nonparametric maximum likelihood estimation of features in spatial point processes using Voronoi tessellation. \emph{Journal of the American Statistical Association} \bold{92}, 1485--1493. } \author{ \adrian and \rolf } \seealso{ \code{\link{nnclean}}, \code{\link{sharpen}} } \examples{ opa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X, 'm')") plot(clusterset(X, "marks", fast=TRUE), add=TRUE, chars=c(1, 3), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, "domain", exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(opa) } \keyword{spatial} \keyword{classif} spatstat.explore/man/density.splitppp.Rd0000644000176200001440000000637014643125461020201 0ustar liggesusers\name{density.splitppp} \alias{density.splitppp} \alias{density.ppplist} \title{Kernel Smoothed Intensity of Split Point Pattern} \description{ Compute a kernel smoothed intensity function for each of the components of a split point pattern, or each of the point patterns in a list. } \usage{ \method{density}{splitppp}(x, \dots, weights=NULL, se=FALSE) \method{density}{ppplist}(x, \dots, weights=NULL, se=FALSE) } \arguments{ \item{x}{ Split point pattern (object of class \code{"splitppp"} created by \code{\link[spatstat.geom]{split.ppp}}) to be smoothed. Alternatively a list of point patterns, of class \code{"ppplist"}. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} to control the smoothing, pixel resolution, edge correction etc. } \item{weights}{ Numerical weights for the points. See Details. } \item{se}{ Logical value indicating whether to compute standard errors as well. } } \value{ A list of pixel images (objects of class \code{"im"}) which can be plotted or printed; or a list of numeric vectors giving the values at specified points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. The argument \code{x} should be a list of point patterns, and should belong to one of the classes \code{"ppplist"} or \code{"splitppp"}. Typically \code{x} is obtained by applying the function \code{\link[spatstat.geom]{split.ppp}} to a point pattern \code{y} by calling \code{split(y)}. This splits the points of \code{y} into several sub-patterns. A kernel estimate of the intensity function of each of the point patterns is computed using \code{\link{density.ppp}}. The return value is usually a list, each of whose entries is a pixel image (object of class \code{"im"}). The return value also belongs to the class \code{"solist"} and can be plotted or printed. If the argument \code{at="points"} is given, the result is a list of numeric vectors giving the intensity values at the data points. If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. The argument \code{weights} specifies numerical case weights for the data points. Normally it should be a list, with the same length as \code{x}. The entry \code{weights[[i]]} will determine the case weights for the pattern \code{x[[i]]}, and may be given in any format acceptable to \code{\link{density.ppp}}. For example, \code{weights[[i]]} can be a numeric vector of length equal to \code{npoints(x[[i]])}, a single numeric value, a numeric matrix, a pixel image (object of class \code{"im"}), an \code{expression}, or a function of class \code{"funxy"}. For convenience, \code{weights} can also be a single \code{expression}, or a single pixel image (object of class \code{"im"}), or a single function of class \code{"funxy"}. } \seealso{ \code{\link[spatstat.geom]{ppp.object}}, \code{\link[spatstat.geom]{im.object}} } \examples{ Z <- density(split(amacrine), 0.05) plot(Z) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/plot.envelope.Rd0000644000176200001440000000303214611073324017425 0ustar liggesusers\name{plot.envelope} \alias{plot.envelope} \title{Plot a Simulation Envelope} \description{ Plot method for the class \code{"envelope"}. } \usage{ \method{plot}{envelope}(x, \dots, main) } \arguments{ \item{x}{ An object of class \code{"envelope"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{main}{Main title for plot.} \item{\dots}{ Extra arguments passed to \code{\link{plot.fv}}. } } \value{ Either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"envelope"} of simulation envelopes. Objects of this class are created by the command \code{\link{envelope}}. This plot method is currently identical to \code{\link{plot.fv}}. Its default behaviour is to shade the region between the upper and lower envelopes in a light grey colour. To suppress the shading and plot the upper and lower envelopes as curves, set \code{shade=NULL}. To change the colour of the shading, use the argument \code{shadecol} which is passed to \code{\link{plot.fv}}. See \code{\link{plot.fv}} for further information on how to control the plot. } \examples{ E <- envelope(cells, Kest, nsim=19) plot(E) plot(E, sqrt(./pi) ~ r) } \seealso{ \code{\link{envelope}}, \code{\link{plot.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{hplot} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/pcf.fv.Rd0000644000176200001440000001134214611073324016020 0ustar liggesusers\name{pcf.fv} \alias{pcf.fv} \title{Pair Correlation Function obtained from K Function} \description{ Estimates the pair correlation function of a point pattern, given an estimate of the K function. } \usage{ \method{pcf}{fv}(X, \dots, method="c") } \arguments{ \item{X}{ An estimate of the \eqn{K} function or one of its variants. An object of class \code{"fv"}. } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function. Essentially a data frame containing (at least) the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{pcf}{vector of values of \eqn{g(r)} } } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an estimate of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivative. It is a method for the generic function \code{\link{pcf}} for the class \code{"fv"}. The argument \code{X} should be an estimated \eqn{K} function, given as a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). This object should be the value returned by \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{Kmulti}} or \code{\link{Kinhom}}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # univariate point pattern X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } K <- Kest(X) p <- pcf.fv(K, spar=0.5, method="b") plot(p, main="pair correlation function for simdat") # indicates inhibition at distances r < 0.3 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Jdot.Rd0000644000176200001440000001642214643125461015547 0ustar liggesusers\name{Jdot} \alias{Jdot} \title{ Multitype J Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between the type \eqn{i} points and the points of any type. } \usage{ Jdot(X, i, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{i\bullet}(r)}{1 - Gi.(r)} and \eqn{1 - F_{\bullet}(r)}{1 - F.(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{i\bullet}(r)}{Ji.(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gdot}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jdot} and its companions \code{\link{Jcross}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``type \eqn{i} to any type'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{i\bullet}(r) = \frac{1 - G_{i\bullet}(r)}{1 - F_{\bullet}(r)}}{Ji.(r) = (1 - Gi.(r))/(1-F.(r))} where \eqn{G_{i\bullet}(r)}{Gi.(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest other point of the pattern, and \eqn{F_{\bullet}(r)}{F.(r)} is the distribution function of the distance from a fixed point in space to the nearest point of the pattern. An estimate of \eqn{J_{i\bullet}(r)}{Ji.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the pattern is a marked Poisson point process, then \eqn{J_{i\bullet}(r) \equiv 1}{Ji.(r) = 1}. If the subprocess of type \eqn{i} points is independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{J_{i\bullet}(r)}{Ji.(r)} equals \eqn{J_{ii}(r)}{Jii(r)}, the ordinary \eqn{J} function (see \code{\link{Jest}} and Van Lieshout and Baddeley (1996)) of the points of type \eqn{i}. Hence deviations from zero of the empirical estimate of \eqn{J_{i\bullet} - J_{ii}}{Ji.-Jii} may suggest dependence between types. This algorithm estimates \eqn{J_{i\bullet}(r)}{Ji.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jcross}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30), ] } Jh. <- Jdot(woods, "hickory") plot(Jh.) # diagnostic plot for independence between hickories and other trees Jhh <- Jest(split(woods)$hickory) plot(Jhh, add=TRUE, legendpos="bottom") # synthetic example with two marks "a" and "b" \donttest{ pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jdot(pp, "a") } } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/fasp.object.Rd0000644000176200001440000000622614611073324017041 0ustar liggesusers\name{fasp.object} \alias{fasp.object} %DoNotExport \title{Function Arrays for Spatial Patterns} \description{ A class \code{"fasp"} to represent a \dQuote{matrix} of functions, amenable to plotting as a matrix of plot panels. } \details{ An object of this class is a convenient way of storing (and later plotting, editing, etc) a set of functions \eqn{f_{i,j}(r)}{f[i,j](r)} of a real argument \eqn{r}, defined for each possible pair \eqn{(i,j)} of indices \eqn{1 \le i,j \le n}{1 <= i,j <= n}. We may think of this as a matrix or array of functions \eqn{f_{i,j}}{f[i,j]}. Function arrays are particularly useful in the analysis of a multitype point pattern (a point pattern in which the points are identified as belonging to separate types). We may want to compute a summary function for the points of type \eqn{i} only, for each of the possible types \eqn{i}. This produces a \eqn{1 \times m}{1 * m} array of functions. Alternatively we may compute a summary function for each possible pair of types \eqn{(i,j)}. This produces an \eqn{m \times m}{m * m} array of functions. For multitype point patterns the command \code{\link[spatstat.explore]{alltypes}} will compute arrays of summary functions for each possible type or for each possible pair of types. The function \code{\link[spatstat.explore]{alltypes}} returns an object of class \code{"fasp"}. An object of class \code{"fasp"} is a list containing at least the following components: \describe{ \item{fns}{ A list of data frames, each representing one of the functions. } \item{which}{ A matrix representing the spatial arrangement of the functions. If \code{which[i,j] = k} then the function represented by \code{fns[[k]]} should be plotted in the panel at position \eqn{(i,j)}. If \code{which[i,j] = NA} then nothing is plotted in that position. } \item{titles}{ A list of character strings, providing suitable plotting titles for the functions. } \item{default.formulae}{ A list of default formulae for plotting each of the functions. } \item{title}{ A character string, giving a default title for the array when it is plotted. } } } \section{Functions available}{ There are methods for \code{plot}, \code{print} and \code{"["} for this class. The plot method displays the entire array of functions. The method \code{\link{[.fasp}} selects a sub-array using the natural indices \code{i,j}. The command \code{\link{eval.fasp}} can be used to apply a transformation to each function in the array, and to combine two arrays. } \seealso{ \code{\link[spatstat.explore]{alltypes}}, \code{\link{plot.fasp}}, \code{\link{[.fasp}}, \code{\link{eval.fasp}} } \examples{ GG <- alltypes(amacrine, 'G') plot(GG) # select the row corresponding to cells of type "on" Gon <- GG["on", ] plot(Gon) # extract the G function for i = "on", j = "off" Gonoff <- GG["on", "off", drop=TRUE] # Fisher variance stabilising transformation GGfish <- eval.fasp(asin(sqrt(GG))) plot(GGfish) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat.explore/man/thresholdCI.Rd0000644000176200001440000000406014611073325017046 0ustar liggesusers\name{thresholdCI} \alias{thresholdCI} \title{ Confidence Interval for Threshold of Numerical Predictor } \description{ Given a point pattern and a spatial covariate that has some predictive value for the point pattern, compute a confidence interval for the optimal value of the threshold that should be used to convert the covariate to a binary predictor. } \usage{ thresholdCI(X, Z, confidence = 0.95, nsim = 1000, parametric = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{Z}{ Spatial covariate with numerical values. Either a pixel image (object of class \code{"im"}), a distance function (object of class \code{"distfun"}) or a \code{function(x,y)} in the \R language. } \item{confidence}{ Confidence level. A number between 0 and 1. } \item{nsim}{ Number of bootstrap simulations to perform. } \item{parametric}{ Logical value specifying whether to use the parametric bootstrap. } } \details{ The spatial covariate \code{Z} is assumed to have some utility as a predictor of the point pattern \code{X}. This code computes a bootstrap confidence interval for the best threshold value \eqn{z} for converting the numerical predictor to a binary predictor, for use in techniques such as Weights of Evidence. } \value{ A matrix containing upper and lower limits for the threshold \code{z} and the corresponding upper and lower limits for the fraction of area of the study region. } \references{ Baddeley, A., Brown, W., Milne, R.K., Nair, G., Rakshit, S., Lawrence, T., Phatak, A. and Fu, S.C. (2021) Optimal thresholding of predictors in mineral prospectivity analysis. \emph{Natural Resources Research} \bold{30} 923--969. } \author{ \adrian. } \seealso{ \code{\link{thresholdSelect}} } \examples{ gold <- rescale(murchison$gold, 1000, "km") faults <- rescale(murchison$faults, 1000, "km") distfault <- distfun(faults) Nsim <- if(interactive()) 250 else 25 thresholdCI(gold, distfault, nsim=Nsim) } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.explore/man/Smooth.fv.Rd0000644000176200001440000000545014611073325016525 0ustar liggesusers\name{Smooth.fv} \alias{Smooth.fv} \title{ Apply Smoothing to Function Values } \description{ Applies smoothing to the values in selected columns of a function value table. } \usage{ \method{Smooth}{fv}(X, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) } \arguments{ \item{X}{ Values to be smoothed. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be smoothed. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} or \code{\link[stats]{loess}} to control the smoothing. } \item{method}{ Smoothing algorithm. A character string, partially matched to either \code{"smooth.spline"} or \code{"loess"}. } \item{xinterval}{ Optional. Numeric vector of length 2 specifying a range of \eqn{x} values. Smoothing will be performed only on the part of the function corresponding to this range. } } \details{ The command \code{Smooth.fv} applies smoothing to the function values in a function value table (object of class \code{"fv"}). \code{Smooth.fv} is a method for the generic function \code{\link{Smooth}}. The smoothing is performed either by \code{\link[stats]{smooth.spline}} or by \code{\link[stats]{loess}}. Smoothing is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding smooth interpolated function values. The optional argument \code{which} specifies which of the columns of function values in \code{x} will be smoothed. The default (indicated by the wildcard \code{which="*"}) is to smooth all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{x}. If the argument \code{xinterval} is given, then smoothing will be performed only in the specified range of \eqn{x} values. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{Smooth}}, \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}}, \code{\link[stats]{smooth.spline}} } \examples{ G <- Gest(cells) plot(G) plot(Smooth(G, df=9), add=TRUE) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/F3est.Rd0000644000176200001440000001300114643125461015621 0ustar liggesusers\name{F3est} \Rdversion{1.1} \alias{F3est} \title{ Empty Space Function of a Three-Dimensional Point Pattern } \description{ Estimates the empty space function \eqn{F_3(r)}{F3(r)} from a three-dimensional point pattern. } \usage{ F3est(X, ..., rmax = NULL, nrval = 128, vside = NULL, correction = c("rs", "km", "cs"), sphere = c("fudge", "ideal", "digital")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{vside}{ Optional. Side length of the voxels in the discrete approximation. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{sphere}{ Optional. Character string specifying how to calculate the theoretical value of \eqn{F_3(r)}{F3(r)} for a Poisson process. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the empty space function is \deqn{ F_3(r) = P(d(0,\Phi) \le r) }{ F3(r) = P(d(0,Phi) <= r) } where \eqn{d(0,\Phi)}{d(0,Phi)} denotes the distance from a fixed origin \eqn{0} to the nearest point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The empty space function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. The box containing the point pattern is discretised into cubic voxels of side length \code{vside}. The distance function \eqn{d(u,\Phi)}{d(u,Phi)} is computed for every voxel centre point \eqn{u} using a three-dimensional version of the distance transform algorithm (Borgefors, 1986). The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{F_3(r)}{F3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"cs"}:}{ the three-dimensional generalisation of the Chiu-Stoyan or Hanisch estimator (Chiu and Stoyan, 1998). } } Alternatively \code{correction="all"} selects all options. The result includes a column \code{theo} giving the theoretical value of \eqn{F_3(r)}{F3(r)} for a uniform Poisson process (Complete Spatial Randomness). This value depends on the volume of the sphere of radius \code{r} measured in the discretised distance metric. The argument \code{sphere} determines how this will be calculated. \itemize{ \item If \code{sphere="ideal"} the calculation will use the volume of an ideal sphere of radius \eqn{r} namely \eqn{(4/3) \pi r^3}{(4/3) * pi * r^3}. This is not recommended because the theoretical values of \eqn{F_3(r)}{F3(r)} are inaccurate. \item If \code{sphere="fudge"} then the volume of the ideal sphere will be multiplied by 0.78, which gives the approximate volume of the sphere in the discretised distance metric. \item If \code{sphere="digital"} then the volume of the sphere in the discretised distance metric is computed exactly using another distance transform. This takes longer to compute, but is exact. } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42} (1993) 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Borgefors, G. (1986) Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34}, 344--371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A small value of \code{vside} and a large value of \code{nrval} are required for reasonable accuracy. The default value of \code{vside} ensures that the total number of voxels is \code{2^22} or about 4 million. To change the default number of voxels, see \code{\link[spatstat.geom]{spatstat.options}("nvoxel")}. } \seealso{ \code{\link[spatstat.geom]{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{G3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} for other summary functions of a three-dimensional point pattern. \code{\link{Fest}} to estimate the empty space function of point patterns in two dimensions. } \examples{ \testonly{op <- spatstat.options(nvoxel=2^18)} X <- rpoispp3(42) Z <- F3est(X) if(interactive()) plot(Z) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{nonparametric} \concept{Three-dimensional} spatstat.explore/man/panel.contour.Rd0000644000176200001440000000454614650323373017442 0ustar liggesusers\name{panel.contour} \alias{panel.contour} \alias{panel.image} \alias{panel.histogram} \title{ Panel Plots using Colour Image or Contour Lines } \description{ These functions can be passed to \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}} to determine what kind of plotting is done in each panel of a multi-panel graphical display. } \usage{ panel.contour(x, y, ..., sigma = NULL) panel.image(x, y, ..., sigma = NULL) panel.histogram(x, ...) } \arguments{ \item{x,y}{ Coordinates of points in a scatterplot. } \item{\dots}{ Extra graphics arguments, passed to \code{\link[spatstat.geom]{contour.im}}, \code{\link[spatstat.geom]{plot.im}} or \code{\link[graphics]{rect}}, respectively, to control the appearance of the panel. } \item{sigma}{ Bandwidth of kernel smoother, on a scale where \eqn{x} and \eqn{y} range between 0 and 1. } } \details{ These functions can serve as one of the arguments \code{panel}, \code{lower.panel}, \code{upper.panel}, \code{diag.panel} passed to graphics commands like \code{\link[graphics]{pairs}} or \code{\link[graphics]{coplot}}, to determine what kind of plotting is done in each panel of a multi-panel graphical display. In particular they work with \code{\link[spatstat.explore]{pairs.im}}. The functions \code{panel.contour} and \code{panel.contour} are suitable for the off-diagonal plots which involve two datasets \code{x} and \code{y}. They first rescale \code{x} and \code{y} to the unit square, then apply kernel smoothing with bandwidth \code{sigma} using \code{\link[spatstat.explore]{density.ppp}}. Then \code{panel.contour} draws a contour plot while \code{panel.image} draws a colour image. The function \code{panel.histogram} is suitable for the diagonal plots which involve a single dataset \code{x}. It displays a histogram of the data. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.explore]{pairs.im}}, \code{\link{pairs.default}}, \code{\link{panel.smooth}} } \examples{ pairs(bei.extra, panel = panel.contour, diag.panel = panel.histogram) with(bei.extra, pairs(grad, elev, panel = panel.image, diag.panel = panel.histogram)) pairs(marks(finpines), panel=panel.contour, diag.panel=panel.histogram) } \keyword{spatial} \keyword{hplot} spatstat.explore/man/Kdot.inhom.Rd0000644000176200001440000003006514643125461016660 0ustar liggesusers\name{Kdot.inhom} \alias{Kdot.inhom} \title{ Inhomogeneous Multitype K Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{K} function, which counts the expected number of points of any type within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kdot.inhom(X, i, lambdaI=NULL, lambdadot=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the entire point process, Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Ignored. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the dot K function \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIdot}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdadot} for each pair of points, the first point of type \code{i} and the second of any type. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdadot}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdadot} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kdot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. Briefly, given a multitype point process, consider the points without their types, and suppose this unmarked point process has intensity function \eqn{\lambda(u)}{lambda(u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda(\zeta)}{1/lambda(z)} at each point \eqn{\zeta}{z} of the process. Then the expected total mass per unit area is 1. The inhomogeneous ``dot-type'' \eqn{K} function \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}, discounting this point itself. If the process of type \eqn{i} points were independent of the points of other types, then \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j} for \eqn{j\neq i}{j != i}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly the argument \code{lambdadot} should contain estimated values of the intensity of the entire point process. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdadot} will be ignored. (The two arguments \code{lambdaI}, \code{lambdadot} allow the user to specify two different methods for calculating the intensities of the two kinds of points, while \code{lambdaX} ensures that the same method is used for both kinds of points.) For advanced use only, the optional argument \code{lambdaIdot} is a matrix containing estimated values of the products of these two intensities for each pair of points, the first point of type \code{i} and the second of any type. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kdot.inhom}; see \code{\link{pcf}}. } \references{ \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kcross.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing woods <- woods[seq(1,npoints(woods), by=10)] ma <- split(woods)$maple lg <- unmark(woods) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") K <- Kdot.inhom(woods, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # Equivalent K <- Kdot.inhom(woods, "maple", sigma=0.15) # Fit model if(require("spatstat.model")) { fit <- ppm(woods ~ marks * polynom(x,y,2)) K <- Kdot.inhom(woods, "maple", lambdaX=fit, update=FALSE, leaveoneout=FALSE) } # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kdot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Extract.fv.Rd0000644000176200001440000000613614611073324016667 0ustar liggesusers\name{Extract.fv} \alias{[.fv} \alias{[<-.fv} \alias{$<-.fv} \title{Extract or Replace Subset of Function Values} \description{ Extract or replace a subset of an object of class \code{"fv"}. } \usage{ \method{[}{fv}(x, i, j, \dots, drop=FALSE) \method{[}{fv}(x, i, j) <- value \method{$}{fv}(x, name) <- value } \arguments{ \item{x}{ a function value object, of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame. } \item{i}{ any appropriate subset index. Selects a subset of the rows of the data frame, i.e. a subset of the domain of the function(s) represented by \code{x}. } \item{j}{ any appropriate subset index for the columns of the data frame. Selects some of the functions present in \code{x}. } \item{name}{ the name of a column of the data frame. } \item{\dots}{ Ignored. } \item{drop}{ Logical. If \code{TRUE}, the result is a data frame or vector containing the selected rows and columns of data. If \code{FALSE} (the default), the result is another object of class \code{"fv"}. } \item{value}{ Replacement value for the column or columns selected by \code{name} or \code{j}. } } \value{ The result of \code{[.fv} with \code{drop=TRUE} is a data frame or vector. Otherwise, the result is another object of class \code{"fv"}. } \details{ These functions extract a designated subset of an object of class \code{"fv"}, or replace the designated subset with other data, or delete the designated subset. The subset is specified by the row index \code{i} and column index \code{j}, or by the column name \code{name}. Either \code{i} or \code{j} may be missing, or both may be missing. The function \code{[.fv} is a method for the generic operator \code{\link{[}} for the class \code{"fv"}. It extracts the designated subset of \code{x}, and returns it as another object of class \code{"fv"} (if \code{drop=FALSE}) or as a data frame or vector (if \code{drop=TRUE}). The function \code{[<-.fv} is a method for the generic operator \code{\link{[<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated subset of \code{x} will be deleted from \code{x}. Otherwise, the designated subset of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. The function \code{$<-.fv} is a method for the generic operator \code{\link{$<-}} for the class \code{"fv"}. If \code{value} is \code{NULL}, the designated column of \code{x} will be deleted from \code{x}. Otherwise, the designated column of \code{x} will be replaced by the data contained in \code{value}. The return value is the modified object \code{x}. } \seealso{ \code{\link{fv.object}} } \examples{ K <- Kest(cells) # discard the estimates of K(r) for r > 0.1 Ksub <- K[K$r <= 0.1, ] # extract the border method estimates bor <- K[ , "border", drop=TRUE] # or equivalently bor <- K$border # remove the border-method estimates K$border <- NULL K } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.explore/man/quadrat.test.Rd0000644000176200001440000002446414650323373017273 0ustar liggesusers\name{quadrat.test} \alias{quadrat.test} \alias{quadrat.test.ppp} \alias{quadrat.test.quadratcount} \title{Dispersion Test for Spatial Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for a given point pattern, based on quadrat counts. Alternatively performs a goodness-of-fit test of a fitted inhomogeneous Poisson model. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ quadrat.test(X, ...) \method{quadrat.test}{ppp}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{quadratcount}(X, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, CR=1, lambda=NULL, df.est=NULL, ..., nsim=1999) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) to be subjected to the goodness-of-fit test. Alternatively a fitted point process model (object of class \code{"ppm"} or \code{"slrm"}) to be tested. Alternatively \code{X} can be the result of applying \code{\link[spatstat.geom]{quadratcount}} to a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{alternative}{ Character string (partially matched) specifying the alternative hypothesis. } \item{method}{ Character string (partially matched) specifying the test to use: either \code{method="Chisq"} for the chi-squared test (the default), or \code{method="MonteCarlo"} for a Monte Carlo test. } \item{conditional}{ Logical. Should the Monte Carlo test be conducted conditionally upon the observed number of points of the pattern? Ignored if \code{method="Chisq"}. } \item{CR}{ Optional. Numerical value. The exponent for the Cressie-Read test statistic. See Details. } \item{lambda}{ Optional. Pixel image (object of class \code{"im"}) or function (class \code{"funxy"}) giving the predicted intensity of the point process. } \item{df.est}{ Optional. Advanced use only. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. } \item{\dots}{Ignored.} \item{xbreaks}{ Optional. Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Optional. Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"} or something acceptable to \code{\link[spatstat.geom]{as.tess}}) determining the quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}. } \item{nsim}{ The number of simulated samples to generate when \code{method="MonteCarlo"}. } } \details{ These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests of goodness-of-fit for a point process model, based on quadrat counts. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}), point process models (class \code{"ppm"} or \code{"slrm"}) and quadrat count tables (class \code{"quadratcount"}). \itemize{ \item if \code{X} is a point pattern, we test the null hypothesis that the data pattern is a realisation of Complete Spatial Randomness (the uniform Poisson point process). Marks in the point pattern are ignored. (If \code{lambda} is given then the null hypothesis is the Poisson process with intensity \code{lambda}.) \item if \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness. See \code{\link[spatstat.explore]{quadrat.test.splitppp}} for documentation. \item If \code{X} is a fitted point process model, then it should be a Poisson point process model. The data to which this model was fitted are extracted from the model object, and are treated as the data point pattern for the test. We test the null hypothesis that the data pattern is a realisation of the (inhomogeneous) Poisson point process specified by \code{X}. } In all cases, the window of observation is divided into tiles, and the number of data points in each tile is counted, as described in \code{\link[spatstat.geom]{quadratcount}}. The quadrats are rectangular by default, or may be regions of arbitrary shape specified by the argument \code{tess}. The expected number of points in each quadrat is also calculated, as determined by CSR (in the first case) or by the fitted model (in the second case). Then the Pearson \eqn{X^2} statistic \deqn{ X^2 = sum((observed - expected)^2/expected) } is computed. If \code{method="Chisq"} then a \eqn{\chi^2}{chi^2} test of goodness-of-fit is performed by comparing the test statistic to the \eqn{\chi^2}{chi^2} distribution with \eqn{m-k} degrees of freedom, where \code{m} is the number of quadrats and \eqn{k} is the number of fitted parameters (equal to 1 for \code{quadrat.test.ppp}). The default is to compute the \emph{two-sided} \eqn{p}-value, so that the test will be declared significant if \eqn{X^2} is either very large or very small. One-sided \eqn{p}-values can be obtained by specifying the \code{alternative}. An important requirement of the \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat be greater than 5. If \code{method="MonteCarlo"} then a Monte Carlo test is performed, obviating the need for all expected counts to be at least 5. In the Monte Carlo test, \code{nsim} random point patterns are generated from the null hypothesis (either CSR or the fitted point process model). The Pearson \eqn{X^2} statistic is computed as above. The \eqn{p}-value is determined by comparing the \eqn{X^2} statistic for the observed point pattern, with the values obtained from the simulations. Again the default is to compute the \emph{two-sided} \eqn{p}-value. If \code{conditional} is \code{TRUE} then the simulated samples are generated from the multinomial distribution with the number of \dQuote{trials} equal to the number of observed points and the vector of probabilities equal to the expected counts divided by the sum of the expected counts. Otherwise the simulated samples are independent Poisson counts, with means equal to the expected counts. If the argument \code{CR} is given, then instead of the Pearson \eqn{X^2} statistic, the Cressie-Read (1984) power divergence test statistic \deqn{ 2nI = \frac{2}{CR(CR+1)} \sum_i \left[ \left( \frac{X_i}{E_i} \right)^CR - 1 \right] }{ 2nI = (2/(CR * (CR+1))) * sum((X[i]/E[i])^CR - 1) } is computed, where \eqn{X_i}{X[i]} is the \eqn{i}th observed count and \eqn{E_i}{E[i]} is the corresponding expected count. The value \code{CR=1} gives the Pearson \eqn{X^2} statistic; \code{CR=0} gives the likelihood ratio test statistic \eqn{G^2}; \code{CR=-1/2} gives the Freeman-Tukey statistic \eqn{T^2}; \code{CR=-1} gives the modified likelihood ratio test statistic \eqn{GM^2}; and \code{CR=-2} gives Neyman's modified statistic \eqn{NM^2}. In all cases the asymptotic distribution of this test statistic is the same \eqn{\chi^2}{chi^2} distribution as above. The return value is an object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. } \seealso{ \code{\link[spatstat.explore]{quadrat.test.splitppp}}, \code{\link[spatstat.geom]{quadratcount}}, \code{\link[spatstat.geom]{quadrats}}, \code{\link[spatstat.random]{quadratresample}}, \code{\link{chisq.test}}, \code{\link[spatstat.explore]{cdf.test}}. To test a Poisson point process model against a specific alternative, use \code{\link[spatstat.model]{anova.ppm}}. } \value{ An object of class \code{"htest"}. See \code{\link[stats]{chisq.test}} for explanation. The return value is also an object of the special class \code{"quadrattest"}, and there is a plot method for this class. See the examples. } \references{ Cressie, N. and Read, T.R.C. (1984) Multinomial goodness-of-fit tests. \emph{Journal of the Royal Statistical Society, Series B} \bold{46}, 440--464. } \examples{ quadrat.test(simdat) quadrat.test(simdat, 4, 3) quadrat.test(simdat, alternative="regular") quadrat.test(simdat, alternative="clustered") ## Likelihood ratio test quadrat.test(simdat, CR=0) ## Power divergence tests quadrat.test(simdat, CR=-1)$p.value quadrat.test(simdat, CR=-2)$p.value # Using Monte Carlo p-values quadrat.test(swedishpines) # Get warning, small expected values. Nsim <- if(interactive()) 4999 else 9 quadrat.test(swedishpines, method="M", nsim=Nsim) quadrat.test(swedishpines, method="M", nsim=Nsim, conditional=FALSE) # quadrat counts qS <- quadratcount(simdat, 4, 3) quadrat.test(qS) te <- quadrat.test(simdat, 4) residuals(te) # Pearson residuals plot(te) plot(simdat, pch="+", cols="green", lwd=2) plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3) sublab <- eval(substitute(expression(p[chi^2]==z), list(z=signif(te$p.value,3)))) title(sub=sublab, cex.sub=3) # quadrats of irregular shape B <- dirichlet(runifpoint(6, Window(simdat))) qB <- quadrat.test(simdat, tess=B) plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+") plot(qB, add=TRUE, col="red", lwd=2, cex=1.2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \concept{Test of randomness} \concept{Test of clustering} spatstat.explore/man/Kcross.inhom.Rd0000644000176200001440000003155614643125461017231 0ustar liggesusers\name{Kcross.inhom} \alias{Kcross.inhom} \title{ Inhomogeneous Cross K Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross \eqn{K} function, which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kcross.inhom(X, i, j, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL, lambdaX=NULL, update=TRUE, leaveoneout=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for advanced use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"} ,\code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdaJ} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points of types \code{i} and \code{j} respectively. } \item{lambdaX}{ Optional. Values of the intensity for all points of \code{X}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If present, this argument overrides both \code{lambdaI} and \code{lambdaJ}. } \item{update}{ Logical value indicating what to do when \code{lambdaI}, \code{lambdaJ} or \code{lambdaX} is a fitted point process model (class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{density.ppp}} or \code{\link[spatstat.model]{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. The inhomogeneous cross-type \eqn{K} function is described by \Moller and Waagepetersen (2003, pages 48-49 and 51-53). Briefly, given a multitype point process, suppose the sub-process of points of type \eqn{j} has intensity function \eqn{\lambda_j(u)}{lambda[j](u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda_j(\zeta)}{1/lambda[j](z)} at each point \eqn{\zeta}{z} of type \eqn{j}. Then the expected total mass per unit area is 1. The inhomogeneous ``cross-type'' \eqn{K} function \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, \Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} should contain estimated values of the intensity of the sub-process of points of type \code{j}. It may be either a pixel image, a function, a numeric vector, or omitted. Alternatively if the argument \code{lambdaX} is given, then it specifies the intensity values for all points of \code{X}, and the arguments \code{lambdaI}, \code{lambdaJ} will be ignored. The optional argument \code{lambdaIJ} is for advanced use only. It is a matrix containing estimated values of the products of these two intensities for each pair of data points of types \code{i} and \code{j} respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. \Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kcross}}, \code{\link{Kinhom}}, \code{\link{Kdot.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data woods <- lansing \testonly{woods <- woods[seq(1,npoints(woods), by=10)]} ma <- split(woods)$maple wh <- split(woods)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaW, lambdaM) # method (2): leave-one-out K <- Kcross.inhom(woods, "whiteoak", "maple", sigma=0.15) # method (3): fit parametric intensity model if(require("spatstat.model")) { fit <- ppm(woods ~marks * polynom(x,y,2)) # alternative (a): use fitted model as 'lambda' argument online <- interactive() K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaI=fit, lambdaJ=fit, update=online, leaveoneout=online) K <- Kcross.inhom(woods, "whiteoak", "maple", lambdaX=fit, update=online, leaveoneout=online) # alternative (b): evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE, leaveoneout=FALSE) # split according to types of points lambda <- split(inten, marks(woods)) K <- Kcross.inhom(woods, "whiteoak", "maple", lambda$whiteoak, lambda$maple) } # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kcross.inhom(X, "A", "B", lambdaI=as.im(50, Window(X)), lambdaJ=lamB) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/plot.ssf.Rd0000644000176200001440000000534214611073324016411 0ustar liggesusers\name{plot.ssf} \alias{plot.ssf} \alias{image.ssf} \alias{contour.ssf} \title{ Plot a Spatially Sampled Function } \description{ Plot a spatially sampled function object. } \usage{ \method{plot}{ssf}(x, \dots, how = c("smoothed", "nearest", "points"), style = c("image", "contour", "imagecontour"), sigma = NULL, contourargs=list()) \method{image}{ssf}(x, \dots) \method{contour}{ssf}(x, ..., main, sigma = NULL) } \arguments{ \item{x}{ Spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{image.default}} or \code{\link[spatstat.geom]{plot.ppp}} to control the plot. } \item{how}{ Character string determining whether to display the function values at the data points (\code{how="points"}), a smoothed interpolation of the function (\code{how="smoothed"}), or the function value at the nearest data point (\code{how="nearest"}). } \item{style}{ Character string indicating whether to plot the smoothed function as a colour image, a contour map, or both. } \item{contourargs}{ Arguments passed to \code{\link[graphics]{contour.default}} to control the contours, if \code{style="contour"} or \code{style="imagecontour"}. } \item{sigma}{ Smoothing bandwidth for smooth interpolation. } \item{main}{ Optional main title for the plot. } } \details{ These are methods for the generic \code{\link[graphics]{plot}}, \code{\link[graphics]{image}} and \code{\link[graphics]{contour}} for the class \code{"ssf"}. An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. For \code{plot.ssf} there are three types of display. If \code{how="points"} the exact function values will be displayed as circles centred at the locations where they were computed. If \code{how="smoothed"} (the default) these values will be kernel-smoothed using \code{\link{Smooth.ppp}} and displayed as a pixel image. If \code{how="nearest"} the values will be interpolated by nearest neighbour interpolation using \code{\link[spatstat.geom]{nnmark}} and displayed as a pixel image. For \code{image.ssf} and \code{contour.ssf} the values are kernel-smoothed before being displayed. } \value{ \code{NULL}. } \references{ Baddeley, A. (2017) Local composite likelihood for spatial point processes. \emph{Spatial Statistics} \bold{22}, 261--295. \baddrubaturnbook } \author{ \adrian. } \seealso{ \code{\link{ssf}} } \examples{ a <- ssf(cells, nndist(cells, k=1:3)) plot(a, how="points") plot(a, how="smoothed") plot(a, how="nearest") } \keyword{spatial} \keyword{hplot} spatstat.explore/man/plot.fv.Rd0000644000176200001440000002577414643125462016252 0ustar liggesusers\name{plot.fv} \alias{plot.fv} \title{Plot Function Values} \description{ Plot method for the class \code{"fv"}. } \usage{ \method{plot}{fv}(x, fmla, \dots, subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, clip.xlim=TRUE, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=fvnames(x, ".s"), shadecol="grey", add=FALSE, log="", mathfont=c("italic", "plain", "bold", "bolditalic"), limitsonly=FALSE) } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{fmla}{ an R language formula determining which variables or expressions are plotted. Either a formula object, or a string that can be parsed as a formula. See Details. } \item{subset}{ (optional) subset of rows of the data frame that will be plotted. } \item{lty}{ (optional) numeric vector of values of the graphical parameter \code{lty} controlling the line style of each plot. } \item{col}{ (optional) numeric vector of values of the graphical parameter \code{col} controlling the colour of each plot. } \item{lwd}{ (optional) numeric vector of values of the graphical parameter \code{lwd} controlling the line width of each plot. } \item{xlim}{ (optional) range of x axis } \item{ylim}{ (optional) range of y axis } \item{xlab}{ (optional) label for x axis } \item{ylab}{ (optional) label for y axis } \item{\dots}{ Extra arguments passed to \code{plot.default}. } \item{clip.xlim}{ Logical value specifying whether the range of the horizontal axis \code{xlim} should be automatically restricted to a subset of the range of the available data. See the section on \bold{Controlling the horizontal axis limits} below. } \item{ylim.covers}{ Optional vector of \eqn{y} values that must be included in the \eqn{y} axis. For example \code{ylim.covers=0} will ensure that the \eqn{y} axis includes the origin. } \item{legend}{ Logical flag or \code{NULL}. If \code{legend=TRUE}, the algorithm plots a legend in the top left corner of the plot, explaining the meaning of the different line types and colours. } \item{legendpos}{ The position of the legend. Either a character string keyword (see \code{\link[graphics]{legend}} for keyword options) or a pair of coordinates in the format \code{list(x,y)}. Alternatively if \code{legendpos="float"}, a location will be selected inside the plot region, avoiding the graphics. } \item{legendavoid}{ Whether to avoid collisions between the legend and the graphics. Logical value. If \code{TRUE}, the code will check for collisions between the legend box and the graphics, and will override \code{legendpos} if a collision occurs. If \code{FALSE}, the value of \code{legendpos} is always respected. } \item{legendmath}{ Logical. If \code{TRUE}, the legend will display the mathematical notation for each curve. If \code{FALSE}, the legend text is the identifier (column name) for each curve. } \item{legendargs}{ Named list containing additional arguments to be passed to \code{\link{legend}} controlling the appearance of the legend. } \item{shade}{ A character vector giving the names of two columns of \code{x}, or another type of index that identifies two columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. The object \code{x} may or may not contain two columns which are designated as boundaries for shading; they are identified by \code{fvnames(x, ".s")}. The default is to shade between these two curves if they exist. To suppress this behaviour, set \code{shade=NULL}. } \item{shadecol}{ The colour to be used in the \code{shade} plot. A character string or an integer specifying a colour. } \item{add}{ Logical. Whether the plot should be added to an existing plot } \item{log}{ A character string which contains \code{"x"} if the x axis is to be logarithmic, \code{"y"} if the y axis is to be logarithmic and \code{"xy"} or \code{"yx"} if both axes are to be logarithmic. } \item{mathfont}{ Character string. The font to be used for mathematical expressions in the axis labels and the legend. } \item{limitsonly}{ Logical. If \code{FALSE}, plotting is performed normally. If \code{TRUE}, no plotting is performed at all; just the \eqn{x} and \eqn{y} limits of the plot are computed and returned. } } \value{ Invisible: either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"fv"}. An object of class \code{"fv"} is a convenient way of storing several different statistical estimates of a summary function; see \code{\link{fv.object}}. The default behaviour, executed by \code{plot(x)}, displays these different estimates as curves with different colours and line styles, and plots a legend explaining them. The use of the argument \code{fmla} is like \code{plot.formula}, but offers some extra functionality. The left and right hand sides of \code{fmla} are evaluated, and the results are plotted against each other (the left side on the \eqn{y} axis against the right side on the \eqn{x} axis). The left and right hand sides of \code{fmla} may be the names of columns of the data frame \code{x}, or expressions involving these names. If a variable in \code{fmla} is not the name of a column of \code{x}, the algorithm will search for an object of this name in the environment where \code{plot.fv} was called, and then in the enclosing environment, and so on. Multiple curves may be specified by a single formula of the form \code{cbind(y1,y2,\dots,yn) ~ x}, where \code{x,y1,y2,\dots,yn} are expressions involving the variables in the data frame. Each of the variables \code{y1,y2,\dots,yn} in turn will be plotted against \code{x}. See the examples. Convenient abbreviations which can be used in the formula are \itemize{ \item the symbol \code{.} which represents all the columns in the data frame that will be plotted by default; \item the symbol \code{.x} which represents the function argument; \item the symbol \code{.y} which represents the recommended value of the function. } For further information, see \code{\link{fvnames}}. The value returned by this plot function indicates the meaning of the line types and colours in the plot. It can be used to make a suitable legend for the plot if you want to do this by hand. See the examples. The argument \code{shade} can be used to display critical bands or confidence intervals. If it is not \code{NULL}, then it should be a subset index for the columns of \code{x}, that identifies exactly 2 columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. See the Examples. The default values of \code{lty}, \code{col} and \code{lwd} can be changed using \code{\link[spatstat.geom]{spatstat.options}("plot.fv")}. Use \code{type = "n"} to create the plot region and draw the axes without plotting any data. Use \code{limitsonly=TRUE} to suppress all plotting and just compute the \eqn{x} and \eqn{y} limits. This can be used to calculate common \eqn{x} and \eqn{y} scales for several plots. To change the kind of parenthesis enclosing the explanatory text about the unit of length, use \code{\link[spatstat.geom]{spatstat.options}('units.paren')} } \section{Controlling the horizontal axis limits}{ The plot generated by \code{plot(x)} does not necessarily display all the data that is contained in the object. The range of values of the function argument \eqn{r} displayed in the plot may be narrower than the range of values actually contained in the data frame. To override this behaviour and display all the available data, set \code{clip.xlim=FALSE}. Statistical literature for summary functions of spatial data recommends that, when the function is plotted, the values of the function argument on the horizontal axis should be restricted to a limited range of values. For example, Ripley recommends that the K-function \eqn{K(r)} should be plotted only for values of distance \eqn{r} between \eqn{0} and \eqn{b/4} where \eqn{b} is the shortest side of the enclosing rectangle of the data. This may be desirable so that the interesting detail is clearly visible in the plot. It may be necessary because values outside the recommended range are theoretically invalid, or unreliable due to high variance or large bias. To support this standard practice, each object of class \code{"fv"} may include data specifying a \dQuote{recommended range} of values of the function argument. The object produced by \code{\link{Kest}} includes a recommended range following Ripley's recommendation above. Similarly for \code{\link{Gest}}, \code{\link{Fest}} and many other commands. When \code{plot(x)} is executed, the horizontal axis is restricted to the recommended range of values. This recommendation can be overridden by setting \code{clip.xlim=FALSE} or by specifying the numerical limits \code{xlim}. } \examples{ K <- Kest(cells) # K is an object of class "fv" plot(K, iso ~ r) # plots iso against r plot(K, sqrt(iso/pi) ~ r) # plots sqrt(iso/r) against r plot(K, cbind(iso,theo) ~ r) # plots iso against r AND theo against r plot(K, . ~ r) # plots all available estimates of K against r plot(K, sqrt(./pi) ~ r) # plots all estimates of L-function # L(r) = sqrt(K(r)/pi) plot(K, cbind(iso,theo) ~ r, col=c(2,3)) # plots iso against r in colour 2 # and theo against r in colour 3 plot(K, iso ~ r, subset=quote(r < 0.2)) # plots iso against r for r < 10 # Can't remember the names of the columns? No problem.. plot(K, sqrt(./pi) ~ .x) # making a legend by hand v <- plot(K, . ~ r, legend=FALSE) legend("topleft", legend=v$meaning, lty=v$lty, col=v$col) # significance bands KE <- envelope(cells, Kest, nsim=19) plot(KE, shade=c("hi", "lo")) # how to display two functions on a common scale Kr <- Kest(redwood) a <- plot(K, limitsonly=TRUE) b <- plot(Kr, limitsonly=TRUE) xlim <- range(a$xlim, b$xlim) ylim <- range(a$ylim, b$ylim) opa <- par(mfrow=c(1,2)) plot(K, xlim=xlim, ylim=ylim) plot(Kr, xlim=xlim, ylim=ylim) par(opa) # For a shortcut, try plot(anylist(K, Kr), equal.scales=TRUE) } \seealso{ \code{\link{fv.object}}, \code{\link[spatstat.explore]{Kest}} } \author{ \adrian and \rolf } \keyword{spatial} \keyword{hplot} spatstat.explore/man/nnclean.Rd0000644000176200001440000001043514643125461016263 0ustar liggesusers\name{nnclean} \alias{nnclean} \alias{nnclean.ppp} \alias{nnclean.pp3} \title{ Nearest Neighbour Clutter Removal } \description{ Detect features in a 2D or 3D spatial point pattern using nearest neighbour clutter removal. } \usage{ nnclean(X, k, ...) \method{nnclean}{ppp}(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) \method{nnclean}{pp3}(X, k, ..., convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) } \arguments{ \item{X}{ A two-dimensional spatial point pattern (object of class \code{"ppp"}) or a three-dimensional point pattern (object of class \code{"pp3"}). } \item{k}{ Degree of neighbour: \code{k=1} means nearest neighbour, \code{k=2} means second nearest, etc. } \item{\dots}{ Arguments passed to \code{\link{hist.default}} to control the appearance of the histogram, if \code{plothist=TRUE}. } \item{edge.correct}{ Logical flag specifying whether periodic edge correction should be performed (only implemented in 2 dimensions). } \item{wrap}{ Numeric value specifying the relative size of the margin in which data will be replicated for the periodic edge correction (if \code{edge.correct=TRUE}). A fraction of window width and window height. } \item{convergence}{ Relative tolerance threshold for testing convergence of EM algorithm. } \item{maxit}{ Maximum number of iterations for EM algorithm. } \item{plothist}{ Logical flag specifying whether to plot a diagnostic histogram of the nearest neighbour distances and the fitted distribution. } \item{verbose}{ Logical flag specifying whether to print progress reports. } } \details{ Byers and Raftery (1998) developed a technique for recognising features in a spatial point pattern in the presence of random clutter. For each point in the pattern, the distance to the \eqn{k}th nearest neighbour is computed. Then the E-M algorithm is used to fit a mixture distribution to the \eqn{k}th nearest neighbour distances. The mixture components represent the feature and the clutter. The mixture model can be used to classify each point as belong to one or other component. The function \code{nnclean} is generic, with methods for two-dimensional point patterns (class \code{"ppp"}) and three-dimensional point patterns (class \code{"pp3"}) currently implemented. The result is a point pattern (2D or 3D) with two additional columns of marks: \describe{ \item{class}{ A factor, with levels \code{"noise"} and \code{"feature"}, indicating the maximum likelihood classification of each point. } \item{prob}{ Numeric vector giving the estimated probabilities that each point belongs to a feature. } } The object also has extra information stored in attributes: \code{"theta"} contains the fitted parameters of the mixture model, \code{"info"} contains information about the fitting procedure, and \code{"hist"} contains the histogram structure returned from \code{\link{hist.default}} if \code{plothist = TRUE}. } \value{ An object of the same kind as \code{X}, obtained by attaching marks to the points of \code{X}. The object also has attributes, as described under Details. } \references{ Byers, S. and Raftery, A.E. (1998) Nearest-neighbour clutter removal for estimating features in spatial point processes. \emph{Journal of the American Statistical Association} \bold{93}, 577--584. } \author{ Original by Simon Byers and Adrian Raftery. Adapted for \pkg{spatstat} by \adrian. } \seealso{ \code{\link[spatstat.geom]{nndist}}, \code{\link[spatstat.geom]{split.ppp}}, \code{\link[spatstat.geom]{cut.ppp}} } \examples{ # shapley galaxy cluster X <- nnclean(shapley, k=17, plothist=TRUE) plot(X, which.marks=1, chars=c(".", "+"), cols=1:2, main="Shapley data, cluster and noise") plot(X, which.marks=2, cols=function(x)hsv(0.2+0.8*(1-x),1,1), main="Shapley data, probability of cluster") Y <- split(X, un=TRUE) plot(Y, chars="+", cex=0.5) marks(X) <- marks(X)$prob plot(cut(X, breaks=3), chars=c(".", "+", "+"), cols=1:3) } \keyword{spatial} \keyword{classif} spatstat.explore/man/pool.rat.Rd0000644000176200001440000000655614611073324016407 0ustar liggesusers\name{pool.rat} \alias{pool.rat} \title{ Pool Data from Several Ratio Objects } \description{ Pool the data from several ratio objects (objects of class \code{"rat"}) and compute a pooled estimate. } \usage{ \method{pool}{rat}(..., weights=NULL, relabel=TRUE, variance=TRUE) } \arguments{ \item{\dots}{ Objects of class \code{"rat"}. } \item{weights}{ Numeric vector of weights. } \item{relabel}{ Logical value indicating whether the result should be relabelled to show that it was obtained by pooling. } \item{variance}{ Logical value indicating whether to compute the sample variance and related terms. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"rat"} of ratio objects. It is used to combine several estimates of the same quantity when each estimate is a ratio. Each of the arguments \code{\dots} must be an object of class \code{"rat"} representing a ratio object (basically a numerator and a denominator; see \code{\link{rat}}). We assume that these ratios are all estimates of the same quantity. If the objects are called \eqn{R_1, \ldots, R_n}{R[1], \dots, R[n]} and if \eqn{R_i}{R[i]} has numerator \eqn{Y_i}{Y[i]} and denominator \eqn{X_i}{X[i]}, so that notionally \eqn{R_i = Y_i/X_i}{R[i] = Y[i]/X[i]}, then the pooled estimate is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i Y_i}{\sum_i X_i}. }{ R = (Y[1]+\dots+Y[n])/(X[1]+\dots+X[n]). } The standard error of \eqn{R} is computed using the delta method as described in Baddeley \emph{et al.} (1993) or Cochran (1977, pp 154, 161). If the argument \code{weights} is given, it should be a numeric vector of length equal to the number of objects to be pooled. The pooled estimator is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i w_i Y_i}{\sum_i w_i X_i} }{ R = (w[1] * Y[1]+\dots+ w[n] * Y[n])/(w[1] * X[1]+\dots+w[n] * X[n]) } where \eqn{w_i}{w[i]} is the \code{i}th weight. This calculation is implemented only for certain classes of objects where the arithmetic can be performed. This calculation is currently implemented only for objects which also belong to the class \code{"fv"} (function value tables). For example, if \code{\link{Kest}} is called with argument \code{ratio=TRUE}, the result is a suitable object (belonging to the classes \code{"rat"} and \code{"fv"}). Warnings or errors will be issued if the ratio objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of the same class as the input. } \seealso{ \code{\link{rat}}, \code{\link{pool}}, \code{\link{pool.fv}}, \code{\link{Kest}} } \examples{ K1 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K2 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K3 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K <- pool(K1, K2, K3) plot(K, pooliso ~ r, shade=c("hiiso", "loiso")) } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Cochran, W.G. (1977) \emph{Sampling techniques}, 3rd edition. New York: John Wiley and Sons. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/localKcross.Rd0000644000176200001440000001241714611073324017121 0ustar liggesusers\name{localKcross} \alias{localKcross} \alias{localLcross} \title{Local Multitype K Function (Cross-Type)} \description{ for a multitype point pattern, computes the cross-type version of the local K function. } \usage{ localKcross(X, from, to, \dots, rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localLcross(X, from, to, \dots, rmax = NULL, correction = "Ripley") } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} with marks which are a factor). } \item{\dots}{ Further arguments passed from \code{localLcross} to \code{localKcross}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{to}{ Type of points to which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{ Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ Given a multitype spatial point pattern \code{X}, the local cross-type \eqn{K} function \code{localKcross} is the local version of the multitype \eqn{K} function \code{\link{Kcross}}. Recall that \code{Kcross(X, from, to)} is a sum of contributions from all pairs of points in \code{X} where the first point belongs to \code{from} and the second point belongs to type \code{to}. The \emph{local} cross-type \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and it consists of all the contributions to the cross-type \eqn{K} function that originate from point \code{X[i]}: \deqn{ K_{i,from,to}(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ K[i,from,to](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} belonging to type \code{to}, that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{K_{i,from,to}(r)}{K[i,from,to](r)} can also be interpreted as one of the summands that contributes to the global estimate of the \code{\link{Kcross}} function. By default, the function \eqn{K_{i,from,to}(r)}{K[i,from,to](r)} is computed for a range of \eqn{r} values for each point \eqn{i} belonging to type \code{from}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} belonging to type \code{from}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X} belonging to type \code{from}. The local cross-type \eqn{L} function \code{localLcross} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern that belong to type \code{from}. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{localK}}, \code{\link{localL}}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{\link{localKcross.inhom}} and \code{\link{localLinhom}}. } \examples{ X <- amacrine # compute all the local Lcross functions L <- localLcross(X) # plot all the local Lcross functions against r plot(L, main="local Lcross functions for amacrine", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 0.1 metres L12 <- localLcross(X, rvalue=0.1) } \author{ \ege and \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Kmulti.Rd0000644000176200001440000001725114643125461016115 0ustar liggesusers\name{Kmulti} \alias{Kmulti} \title{ Marked K-Function } \description{ For a marked point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}. } \usage{ Kmulti(X, I, J, r=NULL, breaks=NULL, correction, \dots, rmax=NULL, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. If necessary, specify \code{rmax}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"periodic"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The function \code{Kmulti} generalises \code{\link{Kest}} (for unmarked point patterns) and \code{\link{Kdot}} and \code{\link{Kcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. The multitype \eqn{K} function is defined so that \eqn{\lambda_J K_{IJ}(r)}{lambda[J] KIJ(r)} equals the expected number of additional random points of \eqn{X_J}{X[J]} within a distance \eqn{r} of a typical point of \eqn{X_I}{X[I]}. Here \eqn{\lambda_J}{lambda[J]} is the intensity of \eqn{X_J}{X[J]} i.e. the expected number of points of \eqn{X_J}{X[J]} per unit area. The function \eqn{K_{IJ}}{KIJ} is determined by the second order moment properties of \eqn{X}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular and polygonal windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{K_{IJ}}{KIJ} is not necessarily differentiable. The border correction (reduced sample) estimator of \eqn{K_{IJ}}{KIJ} used here is pointwise approximately unbiased, but need not be a nondecreasing function of \eqn{r}, while the true \eqn{K_{IJ}}{KIJ} must be nondecreasing. } \seealso{ \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # Longleaf Pine data: marks represent diameter trees <- longleaf \testonly{ trees <- trees[seq(1,npoints(trees), by=50), ] } K <- Kmulti(trees, marks(trees) <= 15, marks(trees) >= 25) plot(K) # functions determining subsets f1 <- function(X) { marks(X) <= 15 } f2 <- function(X) { marks(X) >= 15 } K <- Kmulti(trees, f1, f2) \testonly{ rm(trees) } } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Finhom.Rd0000644000176200001440000001563614643125461016075 0ustar liggesusers\name{Finhom} \alias{Finhom} \title{ Inhomogeneous Empty Space Function } \description{ Estimates the inhomogeneous empty space function of a non-stationary point pattern. } \usage{ Finhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{F} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link[spatstat.geom]{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{F}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the empty space function \eqn{F} for homogeneous point patterns computed by \code{\link{Fest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{F} function is computed using the border correction, equation (6) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Jinhom}}, \code{\link{Fest}} } \examples{ online <- interactive() if(online) { plot(Finhom(swedishpines, sigma=10)) plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2)) } else { ## use a coarse grid for faster computation and package testing plot(Finhom(swedishpines, sigma=10, dimyx=32)) } } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/ssf.Rd0000644000176200001440000000317114611073325015433 0ustar liggesusers\name{ssf} \alias{ssf} \title{ Spatially Sampled Function } \description{ Create an object that represents a spatial function which has been evaluated or sampled at an irregular set of points. } \usage{ ssf(loc, val) } \arguments{ \item{loc}{ The spatial locations at which the function has been evaluated. A point pattern (object of class \code{"ppp"}). } \item{val}{ The function values at these locations. A numeric vector with one entry for each point of \code{loc}, or a data frame with one row for each point of \code{loc}. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. An example would be a spatial covariate that has only been measured at certain locations. An object of this class also inherits the class \code{"ppp"}, and is essentially the same as a marked point pattern, except for the class membership which enables it to be handled in a different way. There are methods for \code{plot}, \code{print} etc; see \code{\link{plot.ssf}} and \code{\link[spatstat.explore:methods.ssf]{methods.ssf}}. Use \code{\link[spatstat.geom]{unmark}} to extract only the point locations, and \code{\link{marks.ssf}} to extract only the function values. } \value{ Object of class \code{"ssf"}. } \author{ \adrian } \seealso{ \code{\link{plot.ssf}}, \code{\link[spatstat.explore:methods.ssf]{methods.ssf}}, \code{\link{Smooth.ssf}}, \code{\link{with.ssf}}, \code{\link{[.ssf}}. } \examples{ ssf(cells, nndist(cells, k=1:3)) } \keyword{spatial} \keyword{datagen} spatstat.explore/man/Gest.Rd0000644000176200001440000002160214643125461015545 0ustar liggesusers\name{Gest} \alias{Gest} \alias{nearest.neighbour} \title{ Nearest Neighbour Distance Function G } \description{ Estimates the nearest neighbour distance distribution function \eqn{G(r)} from a point pattern in a window of arbitrary shape. } \usage{ Gest(X, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han"), domain=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{G(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{G(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{G(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing some or all of the following columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{G(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G(r)}, i.e. the empirical distribution of the distances from each point in the pattern \code{X} to the nearest other point of the pattern } \item{han}{the Hanisch correction estimator of \eqn{G(r)} } \item{theo}{the theoretical value of \eqn{G(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ The nearest neighbour distance distribution function (also called the ``\emph{event-to-event}'' or ``\emph{inter-event}'' distribution) of a point process \eqn{X} is the cumulative distribution function \eqn{G} of the distance from a typical random point of \eqn{X} to the nearest other point of \eqn{X}. An estimate of \eqn{G} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{G} is a useful statistic summarising one aspect of the ``clustering'' of points. For inferential purposes, the estimate of \eqn{G} is usually compared to the true value of \eqn{G} for a completely random (Poisson) point process, which is \deqn{G(r) = 1 - e^{ - \lambda \pi r^2} }{% G(r) = 1 - exp( - lambda * pi * r^2)} where \eqn{\lambda}{lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{G} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the nearest neighbour distance distribution function \eqn{G} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link[spatstat.geom]{as.ppp}()}. The estimation of \eqn{G} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or ``\emph{reduced sample}'' estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Hanisch estimator (Hanisch, 1984). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G(r)}. The hazard rate is defined as the derivative \deqn{\lambda(r) = - \frac{d}{dr} \log (1 - G(r))}{% lambda(r) = - (d/dr) log(1 - G(r))} This estimate should be used with caution as \eqn{G} is not necessarily differentiable. If the argument \code{domain} is given, the estimate of \eqn{G(r)} will be based only on the nearest neighbour distances measured from points falling inside \code{domain} (although their nearest neighbours may lie outside \code{domain}). This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link[spatstat.geom]{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G}. However it is sometimes useful. It can be returned by the algorithm, by selecting \code{correction="none"}. Care should be taken not to use the uncorrected empirical \eqn{G} as if it were an unbiased estimator of \eqn{G}. To simply compute the nearest neighbour distance for each point in the pattern, use \code{\link[spatstat.geom]{nndist}}. To determine which point is the nearest neighbour of a given point, use \code{\link[spatstat.geom]{nnwhich}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest-neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The function \eqn{G} does not necessarily have a density. Any valid c.d.f. may appear as the nearest neighbour distance distribution function of a stationary point process. The reduced sample estimator of \eqn{G} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link[spatstat.geom]{nndist}}, \code{\link[spatstat.geom]{nnwhich}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link[spatstat.univar]{km.rs}}, \code{\link[spatstat.univar]{reduced.sample}}, \code{\link[spatstat.univar]{kaplan.meier}} } \examples{ G <- Gest(cells) plot(G) # P-P style plot plot(G, cbind(km,theo) ~ theo) # the empirical G is below the Poisson G, # indicating an inhibited pattern if(interactive()) { plot(G, . ~ r) plot(G, . ~ theo) plot(G, asin(sqrt(.)) ~ asin(sqrt(theo))) } } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pairorient.Rd0000644000176200001440000000777014611073324017024 0ustar liggesusers\name{pairorient} \alias{pairorient} \title{ Point Pair Orientation Distribution } \description{ Computes the distribution of the orientation of vectors joining pairs of points at a particular range of distances. } \usage{ pairorient(X, r1, r2, \dots, cumulative=FALSE, correction, ratio = FALSE, unit=c("degree", "radian"), domain=NULL) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{r1,r2}{ Minimum and maximum values of distance to be considered. } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"isotropic"}, \code{"translate"}, \code{"border"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. The default is to compute all edge corrections except \code{"none"}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers all pairs of points in the pattern \code{X} that lie more than \code{r1} and less than \code{r2} units apart. The \emph{direction} of the arrow joining the points is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the orientations is calculated using \code{\link{circdensity}}. If \code{cumulative=TRUE}, then the cumulative distribution function of these directions is calculated. This is the function \eqn{O_{r1,r2}(\phi)}{O[r1,r2](phi)} defined in Stoyan and Stoyan (1994), equation (14.53), page 271. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. See the help for \code{\link{Kest}} for details of edge corrections, and explanation of the options available. The choice \code{correction="none"} is not recommended; it is included for demonstration purposes only. The default is to compute all corrections except \code{"none"}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, Random Shapes and Point Fields: Methods of Geometrical Statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Ksector}}, \code{\link{nnorient}} } \examples{ rose(pairorient(redwood, 0.05, 0.15, sigma=8), col="grey") plot(CDF <- pairorient(redwood, 0.05, 0.15, cumulative=TRUE)) plot(f <- deriv(CDF, spar=0.6, Dperiodic=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pairMean.Rd0000644000176200001440000000405414643125461016401 0ustar liggesusers\name{pairMean} \alias{pairMean} \title{ Mean of a Function of Interpoint Distance } \description{ Computes the mean value, or the double integral, of a specified function of the distance between two independent random points in a given window or windows. } \usage{ pairMean(fun, W, V = NULL, ..., normalise = TRUE) } \arguments{ \item{fun}{ A function in the \R language which takes one argument. } \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Further optional arguments passed to \code{\link{distcdf}} to determine the pixel resolution for the calculation and the probability distributions of the random points. } \item{normalise}{ Logical value specifying whether to calculate the mean value (\code{normalise=TRUE}, the default) or the double integral (\code{normalise=FALSE}). } } \details{ This command computes the mean value of \code{fun(T)} where \code{T} is the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{pairMean(fun, W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. Other options are described in \code{\link{distcdf}}. The algorithm uses \code{\link{distcdf}} to compute the cumulative distribution function of \code{T}, and \code{\link[spatstat.univar]{stieltjes}} to compute the mean value of \code{fun(T)}. If \code{normalise=TRUE} (the default) the result is the mean value of \code{fun(T)}. If \code{normalise=FALSE} the result is the double integral. } \value{ A single numeric value. } \author{ \adrian. } \seealso{ \code{\link{distcdf}} } \examples{ pairMean(function(d) { d^2 }, disc()) } \keyword{spatial} \keyword{math} spatstat.explore/man/scan.test.Rd0000644000176200001440000001271214643125462016550 0ustar liggesusers\name{scan.test} \alias{scan.test} \title{ Spatial Scan Test } \description{ Performs the Spatial Scan Test for clustering in a spatial point pattern, or for clustering of one type of point in a bivariate spatial point pattern. } \usage{ scan.test(X, r, ..., method = c("poisson", "binomial"), nsim = 19, baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), verbose = TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number or a numeric vector. } \item{\dots}{ Optional. Arguments passed to \code{\link[spatstat.geom]{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{nsim}{ Number of simulations for computing Monte Carlo p-value. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{verbose}{ Logical. Whether to print progress reports. } } \details{ The spatial scan test (Kulldorf, 1997) is applied to the point pattern \code{X}. In a nutshell, \itemize{ \item If \code{method="poisson"} then a significant result would mean that there is a circle of radius \code{r}, located somewhere in the spatial domain of the data, which contains a significantly higher than expected number of points of \code{X}. That is, the pattern \code{X} exhibits spatial clustering. \item If \code{method="binomial"} then \code{X} must be a bivariate (two-type) point pattern. By default, the first type of point is interpreted as a control (non-event) and the second type of point as a case (event). A significant result would mean that there is a circle of radius \code{r} which contains a significantly higher than expected number of cases. That is, the cases are clustered together, conditional on the locations of all points. } Following is a more detailed explanation. \itemize{ \item If \code{method="poisson"} then the scan test based on Poisson likelihood is performed (Kulldorf, 1997). The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). The alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside some circle of radius \code{r} and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside some circle of radius \code{r}, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the scan test based on binomial likelihood is performed (Kulldorf, 1997). The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that some circle of radius \code{r} has a higher proportion of points of the second type, than expected under the null hypothesis. } The result of \code{scan.test} is a hypothesis test (object of class \code{"htest"}) which can be plotted to report the results. The component \code{p.value} contains the \eqn{p}-value. The result of \code{scan.test} can also be plotted (using the plot method for the class \code{"scan.test"}). The plot is a pixel image of the Likelihood Ratio Test Statistic (2 times the log likelihood ratio) as a function of the location of the centre of the circle. This pixel image can be extracted from the object using \code{\link{as.im.scan.test}}. The Likelihood Ratio Test Statistic is computed by \code{\link{scanLRTS}}. } \value{ An object of class \code{"htest"} (hypothesis test) which also belongs to the class \code{"scan.test"}. Printing this object gives the result of the test. Plotting this object displays the Likelihood Ratio Test Statistic as a function of the location of the centre of the circle. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{\adrian and \rolf } \seealso{ \code{\link{plot.scan.test}}, \code{\link{as.im.scan.test}}, \code{\link{relrisk}}, \code{\link{scanLRTS}} } \examples{ nsim <- if(interactive()) 19 else 2 rr <- if(interactive()) seq(0.5, 1, by=0.1) else c(0.5, 1) scan.test(redwood, 0.1 * rr, method="poisson", nsim=nsim) scan.test(chorley, rr, method="binomial", case="larynx", nsim=nsim) } \keyword{htest} \keyword{spatial} \concept{Test of randomness} \concept{Test of clustering} spatstat.explore/man/Hest.Rd0000644000176200001440000001344114643125461015550 0ustar liggesusers\name{Hest} \alias{Hest} \title{Spherical Contact Distribution Function} \description{ Estimates the spherical contact distribution function of a random set. } \usage{ Hest(X, r=NULL, breaks=NULL, ..., W, correction=c("km", "rs", "han"), conditional=TRUE) } \arguments{ \item{X}{The observed random set. An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}. Alternatively a pixel image (class \code{"im"}) with logical values. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{H(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the discretisation. } \item{W}{ Optional. A window (object of class \code{"owin"}) to be taken as the window of observation. The contact distribution function will be estimated from values of the contact distance inside \code{W}. The default is \code{W=Frame(X)} when \code{X} is a window, and \code{W=Window(X)} otherwise. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{H(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"han"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{conditional}{ Logical value indicating whether to compute the conditional or unconditional distribution. See Details. } } \details{ The spherical contact distribution function of a stationary random set \eqn{X} is the cumulative distribution function \eqn{H} of the distance from a fixed point in space to the nearest point of \eqn{X}, given that the point lies outside \eqn{X}. That is, \eqn{H(r)} equals the probability that \code{X} lies closer than \eqn{r} units away from the fixed point \eqn{x}, given that \code{X} does not cover \eqn{x}. Let \eqn{D = d(x,X)} be the shortest distance from an arbitrary point \eqn{x} to the set \code{X}. Then the spherical contact distribution function is \deqn{H(r) = P(D \le r \mid D > 0)}{H(r) = P(D <= r | D > 0)} For a point process, the spherical contact distribution function is the same as the empty space function \eqn{F} discussed in \code{\link{Fest}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}) or a window (object of class \code{"owin"}). It is assumed to be a realisation of a stationary random set. The algorithm first calls \code{\link[spatstat.geom]{distmap}} to compute the distance transform of \code{X}, then computes the Kaplan-Meier and reduced-sample estimates of the cumulative distribution following Hansen et al (1999). If \code{conditional=TRUE} (the default) the algorithm returns an estimate of the spherical contact function \eqn{H(r)} as defined above. If \code{conditional=FALSE}, it instead returns an estimate of the cumulative distribution function \eqn{H^\ast(r) = P(D \le r)}{H*(r) = P(D <= r)} which includes a jump at \eqn{r=0} if \code{X} has nonzero area. Accuracy depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link[spatstat.geom]{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing up to six columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{H(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{H(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{H(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{H(r)} by the spatial Kaplan-Meier method } \item{han}{the spatial Hanisch-Chiu-Stoyan estimator of \eqn{H(r)} } \item{raw}{the uncorrected estimate of \eqn{H(r)}, i.e. the empirical distribution of the distance from a fixed point in the window to the nearest point of \code{X} } } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Hansen, M.B., Baddeley, A.J. and Gill, R.D. First contact distributions for spatial patterns: regularity and estimation. \emph{Advances in Applied Probability} \bold{31} (1999) 15-33. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \seealso{\code{\link{Fest}}} \examples{ X <- runifpoint(42) H <- Hest(X) Y <- rpoisline(10) H <- Hest(Y) H <- Hest(Y, dimyx=256) X <- heather$coarse plot(Hest(X)) H <- Hest(X, conditional=FALSE) P <- owin(poly=list(x=c(5.3, 8.5, 8.3, 3.7, 1.3, 3.7), y=c(9.7, 10.0, 13.6, 14.4, 10.7, 7.2))) plot(X) plot(P, add=TRUE, col="red") H <- Hest(X, W=P) Z <- as.im(FALSE, Frame(X)) Z[X] <- TRUE Z <- Z[P, drop=FALSE] plot(Z) H <- Hest(Z) } \author{ \spatstatAuthors with contributions from Kassel Hingee. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/deriv.fv.Rd0000644000176200001440000000730614611073323016365 0ustar liggesusers\name{deriv.fv} \alias{deriv.fv} \title{ Calculate Derivative of Function Values } \description{ Applies numerical differentiation to the values in selected columns of a function value table. } \usage{ \method{deriv}{fv}(expr, which = "*", ..., method=c("spline", "numeric"), kinks=NULL, periodic=FALSE, Dperiodic=periodic) } \arguments{ \item{expr}{ Function values to be differentiated. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be differentiated. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} to control the differentiation algorithm, if \code{method="spline"}. } \item{method}{ Differentiation method. A character string, partially matched to either \code{"spline"} or \code{"numeric"}. } \item{kinks}{ Optional vector of \eqn{x} values where the derivative is allowed to be discontinuous. } \item{periodic}{ Logical value indicating whether the function \code{expr} is periodic. } \item{Dperiodic}{ Logical value indicating whether the resulting derivative should be a periodic function. } } \details{ This command performs numerical differentiation on the function values in a function value table (object of class \code{"fv"}). The differentiation is performed either by \code{\link[stats]{smooth.spline}} or by a naive numerical difference algorithm. The command \code{\link{deriv}} is generic. This is the method for objects of class \code{"fv"}. Differentiation is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding derivatives. The optional argument \code{which} specifies which of the columns of function values in \code{expr} will be differentiated. The default (indicated by the wildcard \code{which="*"}) is to differentiate all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{expr}. If the argument \code{kinks} is given, it should be a numeric vector giving the discontinuity points of the function: the value or values of the function argument at which the function is not differentiable. Differentiation will be performed separately on intervals between the discontinuity points. If \code{periodic=TRUE} then the function \code{expr} is taken to be periodic, with period equal to the range of the function argument in \code{expr}. The resulting derivative is periodic. If \code{periodic=FALSE} but \code{Dperiodic=TRUE}, then the \emph{derivative} is assumed to be periodic. This would be appropriate if \code{expr} is the cumulative distribution function of an angular variable, for example. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{\adrian and \rolf } \seealso{ \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}} } \examples{ G <- Gest(cells) plot(deriv(G, which=".", spar=0.5)) A <- pairorient(redwood, 0.05, 0.15) DA <- deriv(A, spar=0.6, Dperiodic=TRUE) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat.explore/man/pcfmulti.Rd0000644000176200001440000001177514611073324016473 0ustar liggesusers\name{pcfmulti} \alias{pcfmulti} \title{ Marked pair correlation function } \description{ For a marked point pattern, estimate the multitype pair correlation function using kernel methods. } \usage{ pcfmulti(X, I, J, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), Iname = "points satisfying condition I", Jname = "points satisfying condition J", ratio = FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. } \item{Iname,Jname}{ Optional. Character strings describing the members of the subsets \code{I} and \code{J}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This is a generalisation of \code{\link{pcfcross}} to arbitrary collections of points. The algorithm measures the distance from each data point in subset \code{I} to each data point in subset \code{J}, excluding identical pairs of points. The distances are kernel-smoothed and renormalised to form a pair correlation function. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The arguments \code{I} and \code{J} specify two subsets of the point pattern \code{X}. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285) applied to the points of type \code{j}. That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process of type \code{j}, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. } \value{ An object of class \code{"fv"}. } \seealso{ \code{\link{pcfcross}}, \code{\link{pcfdot}}, \code{\link{pcf.ppp}}. } \examples{ adult <- (marks(longleaf) >= 30) juvenile <- !adult p <- pcfmulti(longleaf, adult, juvenile) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bw.CvL.adaptive.Rd0000644000176200001440000001060414643125461017532 0ustar liggesusers\name{bw.CvL.adaptive} \alias{bw.CvL.adaptive} \title{ Select Adaptive Bandwidth for Kernel Estimation Using Cronie-Van Lieshout Criterion } \description{ Uses the Cronie-Van Lieshout criterion to select the global smoothing bandwidth for adaptive kernel estimation of point process intensity. } \usage{ bw.CvL.adaptive(X, \dots, hrange = NULL, nh = 16, h=NULL, bwPilot = bw.scott.iso(X), edge = FALSE, diggle = TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{ Additional arguments passed to \code{\link{densityAdaptiveKernel.ppp}}. } \item{hrange}{ Optional numeric vector of length 2 giving the range of values of global bandwidth \code{h} to be searched. } \item{nh}{ Optional integer giving the number of values of bandwidth \code{h} to search. } \item{h}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{nh} and \code{hrange}. } \item{bwPilot}{ Pilot bandwidth. A scalar value in the same units as the coordinates of \code{X}. The smoothing bandwidth for computing an initial estimate of intensity using \code{\link{density.ppp}}. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } } \details{ This function selects an appropriate value of global bandwidth \code{h0} for adaptive kernel estimation of the intensity function for the point pattern \code{X}. In adaptive estimation, each point in the point pattern is subjected to a different amount of smoothing, controlled by data-dependent or spatially-varying bandwidths. The global bandwidth \code{h0} is a scale factor which is used to adjust all of the data-dependent bandwidths according to the Abramson (1982) square-root rule. This function considers each candidate value of bandwidth \eqn{h}, performs the smoothing steps described above, extracts the adaptively-estimated intensity values \eqn{\hat\lambda(x_i)}{lambda(X[i])} at each data point \eqn{x_i}{X[i]}, and calculates the Cronie-Van Lieshout criterion \deqn{ \mbox{CvL}(h) = \sum_{i=1}^n \frac 1 {\hat\lambda(x_i)}. }{ CvL(h) = sum_[i=1,...n] 1/lambda(X[i]). } The value of \eqn{h} which minimises the squared difference \deqn{ LP2(h) = (CvL(h) - |W|)^2 } (where \code{|W|} is the area of the window of \code{X}) is selected as the optimal global bandwidth. Bandwidths \code{h} are physical distance values expressed in the same units as the coordinates of \code{X}. } \value{ A single numerical value giving the selected global bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \references{ Abramson, I. (1982) On bandwidth variation in kernel estimates --- a square root law. \emph{Annals of Statistics}, \bold{10}(4), 1217-1223.\cr Cronie, O and Van Lieshout, M N M (2018) A non-model-based approach to bandwidth selection for kernel estimators of spatial intensity functions, \emph{Biometrika}, \bold{105}, 455-462. Van Lieshout, M.N.M. (2021) Infill asymptotics for adaptive kernel estimators of spatial intensity. \emph{Australian and New Zealand Journal of Statistics} \bold{63} (1) 159--181. } \author{ Marie-Colette Van Lieshout. Modified by \adrian. } \seealso{ \code{\link[spatstat.explore]{bw.optim.object}}. \code{\link{adaptive.density}}, \code{\link{densityAdaptiveKernel.ppp}}, \code{\link{bw.abram.ppp}}, \code{\link{density.ppp}}. To select a \emph{fixed} smoothing bandwidth using the Cronie-Van Lieshout criterion, use \code{\link{bw.CvL}}. } \examples{ online <- interactive() if(online) { h0 <- bw.CvL.adaptive(redwood3) } else { ## faster computation for package checker h0 <- bw.CvL.adaptive(redwood3, nh=8, hrange=c(1/4, 4) * bw.diggle(redwood3)) } plot(h0) plot(as.fv(h0), CvL ~ h) if(online) { Z <- densityAdaptiveKernel(redwood3, h0) plot(Z) } } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Adaptive smoothing} \concept{Bandwidth selection} spatstat.explore/man/relriskHeat.Rd0000644000176200001440000000657014700375513017126 0ustar liggesusers\name{relriskHeat} \alias{relriskHeat} \alias{relriskHeat.ppp} \title{ Diffusion Estimate of Conditional Probabilities } \description{ Computes the conditional probability estimator of relative risk based on a multitype point pattern using the diffusion estimate of the type-specific intensities. } \usage{ relriskHeat(X, \dots) \method{relriskHeat}{ppp}(X, \dots, sigmaX=NULL, weights=NULL) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link[spatstat.explore]{densityHeat}} controlling the estimation of each marginal intensity. } \item{sigmaX}{ Optional. Numeric vector of bandwidths, one associated with each data point in \code{X}. } \item{weights}{ Optional numeric vector of weights associated with each point of \code{X}. } } \details{ The function \code{relriskHeat} is generic. This file documents the method \code{relriskHeat.ppp} for spatial point patterns (objects of class \code{"ppp"}). This function estimates the spatially-varying conditional probability that a random point (given that it is present) will belong to a given type. The algorithm separates \code{X} into the sub-patterns consisting of points of each type. It then applies \code{\link[spatstat.explore]{densityHeat}} to each sub-pattern, using the same bandwidth and smoothing regimen for each sub-pattern, as specified by the arguments \code{\dots}. If \code{weights} is specified, it should be a numeric vector of length equal to the number of points in \code{X}, so that \code{weights[i]} is the weight for data point \code{X[i]}. Similarly when performing lagged-arrival smoothing, the argument \code{sigmaX} must be a numeric vector of the same length as the number of points in \code{X}, and thus contain the point-specific bandwidths in the order corresponding to each of these points regardless of mark. } \value{ A named list (of class \code{\link[spatstat.geom]{solist}}) containing pixel \code{\link[spatstat.geom]{im}}ages, giving the estimated conditional probability surfaces for each type. } \seealso{ \code{\link[spatstat.explore]{relrisk.ppp}} for the traditional convolution-based kernel estimator of conditional probability surfaces, and the function \code{risk} in the \pkg{sparr} package for the density-ratio-based estimator. } \references{ Agarwal, N. and Aluru, N.R. (2010) A data-driven stochastic collocation approach for uncertainty quantification in MEMS. \emph{International Journal for Numerical Methods in Engineering} \bold{83}, 575--597. Baddeley, A., Davies, T., Rakshit, S., Nair, G. and McSwiggan, G. (2022) Diffusion smoothing for spatial point patterns. \emph{Statistical Science} \bold{37}, 123--142. Barry, R.P. and McIntyre, J. (2011) Estimating animal densities and home range in regions with irregular boundaries and holes: a lattice-based alternative to the kernel density estimator. \emph{Ecological Modelling} \bold{222}, 1666--1672. Botev, Z.I. and Grotowski, J.F. and Kroese, D.P. (2010) Kernel density estimation via diffusion. \emph{Annals of Statistics} \bold{38}, 2916--2957. } \author{ \adrian and \tilman. } \examples{ ## bovine tuberculosis data X <- subset(btb, select=spoligotype) plot(X) P <- relriskHeat(X,sigma=9) plot(P) } \keyword{spatial} \keyword{smooth} spatstat.explore/man/bw.frac.Rd0000644000176200001440000000442514643125461016171 0ustar liggesusers\name{bw.frac} \alias{bw.frac} \title{ Bandwidth Selection Based on Window Geometry } \description{ Select a smoothing bandwidth for smoothing a point pattern, based only on the geometry of the spatial window. The bandwidth is a specified quantile of the distance between two independent random points in the window. } \usage{ bw.frac(X, \dots, f=1/4) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or point pattern (object of class \code{"ppp"}) or other data which can be converted to a window using \code{\link[spatstat.geom]{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{distcdf}}. } \item{f}{ Probability value (between 0 and 1) determining the quantile of the distribution. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is computed as a quantile of the distance between two independent random points in the window. The default is the lower quartile of this distribution. If \eqn{F(r)} is the cumulative distribution function of the distance between two independent random points uniformly distributed in the window, then the value returned is the quantile with probability \eqn{f}. That is, the bandwidth is the value \eqn{r} such that \eqn{F(r) = f}. The cumulative distribution function \eqn{F(r)} is computed using \code{\link{distcdf}}. We then we compute the smallest number \eqn{r} such that \eqn{F(r) \ge f}{F(r) >= f}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.frac"} which can be plotted to show the cumulative distribution function and the selected quantile. } \seealso{ For estimating point process intensity, see \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}. For other smoothing purposes, see \code{\link{bw.stoyan}}, \code{\link{bw.smoothppp}}, \code{\link{bw.relrisk}}. } \examples{ h <- bw.frac(letterR) h plot(h, main="bw.frac(letterR)") } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/spatstat.explore-deprecated.Rd0000644000176200001440000000126714643125461022266 0ustar liggesusers\name{spatstat.explore-deprecated} \alias{spatstat.explore-deprecated} %DoNotExport \alias{evalCovar} \alias{which.max.im} \title{Deprecated spatstat.explore functions} \description{ Deprecated spatstat.explore functions. } \usage{ evalCovar(model, covariate, \dots) which.max.im(x) } \details{ These functions are deprecated, and will eventually be deleted from the \pkg{spatstat.explore} package. \code{which.max.im(x)} is replaced by \code{\link[spatstat.geom]{im.apply}(x, which.max)}. The internal function \code{evalCovar} is replaced by the internal function \code{spatialCovariateEvidence}. } \value{ \code{which.max.im} returns an integer. } \keyword{internal} spatstat.explore/man/Tstat.Rd0000644000176200001440000000563514643125461015752 0ustar liggesusers\name{Tstat} \alias{Tstat} \title{ Third order summary statistic } \description{ Computes the third order summary statistic \eqn{T(r)} of a spatial point pattern. } \usage{ Tstat(X, ..., r = NULL, rmax = NULL, correction = c("border", "translate"), ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{T(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{T(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{rmax}{ Optional. Numeric. The maximum value of \eqn{r} for which \eqn{T(r)} should be estimated. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"translate"}, \code{"translation"}, or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical. If \code{TRUE}, an estimate of the computation time is printed. } } \details{ This command calculates the third-order summary statistic \eqn{T(r)} for a spatial point patterns, defined by Schladitz and Baddeley (2000). The definition of \eqn{T(r)} is similar to the definition of Ripley's \eqn{K} function \eqn{K(r)}, except that \eqn{K(r)} counts pairs of points while \eqn{T(r)} counts triples of points. Essentially \eqn{T(r)} is a rescaled cumulative distribution function of the diameters of triangles in the point pattern. The diameter of a triangle is the length of its longest side. } \section{Computation time}{ If the number of points is large, the algorithm can take a very long time to inspect all possible triangles. A rough estimate of the total computation time will be printed at the beginning of the calculation. If this estimate seems very large, stop the calculation using the user interrupt signal, and call \code{Tstat} again, using \code{rmax} to restrict the range of \code{r} values, thus reducing the number of triangles to be inspected. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Schladitz, K. and Baddeley, A. (2000) A third order point process characteristic. \emph{Scandinavian Journal of Statistics} \bold{27} (2000) 657--671. } \seealso{ \code{\link{Kest}} } \examples{ plot(Tstat(redwood)) } \author{\adrian} \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/PPversion.Rd0000644000176200001440000000555214611073323016570 0ustar liggesusers\name{PPversion} \alias{PPversion} \alias{QQversion} \title{ Transform a Function into its P-P or Q-Q Version } \description{ Given a function object \code{f} containing both the estimated and theoretical versions of a summary function, these operations combine the estimated and theoretical functions into a new function. When plotted, the new function gives either the P-P plot or Q-Q plot of the original \code{f}. } \usage{ PPversion(f, theo = "theo", columns = ".") QQversion(f, theo = "theo", columns = ".") } \arguments{ \item{f}{ The function to be transformed. An object of class \code{"fv"}. } \item{theo}{ The name of the column of \code{f} that should be treated as the theoretical value of the function. } \item{columns}{ Character vector, specifying the columns of \code{f} to which the transformation will be applied. Either a vector of names of columns of \code{f}, or one of the abbreviations recognised by \code{\link{fvnames}}. } } \details{ The argument \code{f} should be an object of class \code{"fv"}, containing both empirical estimates \eqn{\widehat f(r)}{fhat(r)} and a theoretical value \eqn{f_0(r)}{f0(r)} for a summary function. The \emph{P--P version} of \code{f} is the function \eqn{g(x) = \widehat f (f_0^{-1}(x))}{g(x) = fhat(f0^(-1)(x))} where \eqn{f_0^{-1}}{f0^(-1)} is the inverse function of \eqn{f_0}{f0}. A plot of \eqn{g(x)} against \eqn{x} is equivalent to a plot of \eqn{\widehat f(r)}{fhat(r)} against \eqn{f_0(r)}{f0(r)} for all \eqn{r}. If \code{f} is a cumulative distribution function (such as the result of \code{\link{Fest}} or \code{\link{Gest}}) then this is a P--P plot, a plot of the observed versus theoretical probabilities for the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. The \emph{Q--Q version} of \code{f} is the function \eqn{h(x) = f_0^{-1}(\widehat f(x))}{f0^(-1)(fhat(x))}. If \code{f} is a cumulative distribution function, a plot of \eqn{h(x)} against \eqn{x} is a Q--Q plot, a plot of the observed versus theoretical quantiles of the distribution. The diagonal line \eqn{y=x} corresponds to perfect agreement between observed and theoretical distribution. Another straight line corresponds to the situation where the observed variable is a linear transformation of the theoretical variable. For a point pattern \code{X}, the Q--Q version of \code{Kest(X)} is essentially equivalent to \code{Lest(X)}. } \value{ Another object of class \code{"fv"}. } \author{ Tom Lawrence and Adrian Baddeley. Implemented by \spatstatAuthors. } \seealso{ \code{\link{plot.fv}} } \examples{ opa <- par(mar=0.1+c(5,5,4,2)) G <- Gest(redwoodfull) plot(PPversion(G)) plot(QQversion(G)) par(opa) } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat.explore/man/pool.envelope.Rd0000644000176200001440000000557514611073324017436 0ustar liggesusers\name{pool.envelope} \alias{pool.envelope} \title{ Pool Data from Several Envelopes } \description{ Pool the simulation data from several simulation envelopes (objects of class \code{"envelope"}) and compute a new envelope. } \usage{ \method{pool}{envelope}(..., savefuns=FALSE, savepatterns=FALSE) } \arguments{ \item{\dots}{ Objects of class \code{"envelope"}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"envelope"} of simulation envelopes. It is used to combine the simulation data from several simulation envelopes and to compute an envelope based on the combined data. Each of the arguments \code{\dots} must be an object of class \code{"envelope"}. These envelopes must be compatible, in that they are envelopes for the same function, and were computed using the same options. \itemize{ \item In normal use, each envelope object will have been created by running the command \code{\link{envelope}} with the argument \code{savefuns=TRUE}. This ensures that each object contains the simulated data (summary function values for the simulated point patterns) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new envelope is computed from the combined set of simulations. \item Alternatively, if each envelope object was created by running \code{\link{envelope}} with \code{VARIANCE=TRUE}, then the saved functions are not required. The sample means and sample variances from each envelope will be pooled. A new envelope is computed from the pooled mean and variance. } Warnings or errors will be issued if the envelope objects \code{\dots} appear to be incompatible. Apart from these basic checks, the code is not smart enough to decide whether it is sensible to pool the data. To modify the envelope parameters or the type of envelope that is computed, first pool the envelope data using \code{pool.envelope}, then use \code{\link{envelope.envelope}} to modify the envelope parameters. } \value{ An object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}}, \code{\link{envelope.envelope}}, \code{\link{pool}}, \code{\link{pool.fasp}} } \examples{ E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE) E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE) pool(E1, E2) V1 <- envelope(E1, VARIANCE=TRUE) V2 <- envelope(E2, VARIANCE=TRUE) pool(V1, V2) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/SpatialQuantile.ppp.Rd0000644000176200001440000001170214611073325020535 0ustar liggesusers\name{SpatialQuantile.ppp} \alias{SpatialQuantile.ppp} \title{ Spatially Weighted Quantile of Values at Points } \description{ Given a spatial point pattern with numeric marks, compute a weighted quantile of the mark values, with spatially-varying weights that depend on distance to the data points. } \usage{ \method{SpatialQuantile}{ppp}(X, prob = 0.5, sigma = NULL, \dots, type = 1, at = c("pixels", "points"), leaveoneout = TRUE, weights = NULL, edge = TRUE, diggle = FALSE, verbose = FALSE) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}) with numeric marks. } \item{prob}{ Probability for which the quantile is required. A single numeric value between 0 and 1. } \item{sigma}{ Smoothing bandwidth, passed to \code{\link{density.ppp}}. } \item{\dots}{ Further arguments passed to \code{\link{density.ppp}} controlling the spatial smoothing. } \item{type}{ Integer specifying the type of median (using the convention of \code{\link[stats]{quantile.default}}; see Details). Only types 1 and 4 are currently implemented. } \item{at}{ Character string indicating whether to compute the quantile at every pixel of a pixel image (\code{at="pixels"}, the default) or at every data point of \code{X} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{weights}{ Optional vector of numeric weights attached to the points of \code{X}. } \item{edge,diggle}{ Arguments passed to \code{\link{density.ppp}} to determine the edge correction. } \item{verbose}{ Logical value specifying whether to print progress reports during the calculation. } } \details{ The argument \code{X} should be a spatial point pattern (object of class \code{"ppp"}) with numeric marks. The algorithm computes the weighted quantile of the mark values at each desired spatial location, using spatially-varying weights which depend on distance to the data points. Suppose the data points are at spatial locations \eqn{x_1,\ldots,x_n}{x[1], ..., x[n]} and have mark values \eqn{y_1,\ldots,y_n}{y[1], ..., y[n]}. For a query location \eqn{u}, the smoothed quantile is defined as the weighted quantile of the mark values \eqn{y_1,\ldots,y_n}{y[1], ..., y[n]} with weights \eqn{w_1(u),\ldots,w_n(u)}{w[1](u), ..., w[n](u)}, where \deqn{ w_i(u) = \frac{k(u,x_i)}{\sum_{j=1}^n k(u,x_j)} }{ w[i](u) = k(u,x[i])/(k(u, x[1]) + ... + k(u, x[n])) } where \eqn{k(u,v)} is the smoothing kernel with bandwidth \code{sigma}. If \code{at="points"} and \code{leaveoneout=TRUE}, then a leave-one-out calculation is performed, which means that when the query location is a data point \eqn{x_i}{x[i]}, the value at the data point is ignored, and the weighted quantile is computed from the values \eqn{y_j}{y[j]} for all \eqn{j} not equal to \eqn{i}. The calculation of the quantile value depends on the argument \code{type} which is interpreted in the same way as for \code{\link[stats]{quantile.default}}. Currently, only types 1 and 4 are implemented. If \code{type=1} (the default), the quantile value is one of the mark values (one of the values in \code{marks(x)}). If \code{type=4}, the quantile value is obtained by linearly interpolating between mark values. Note that the default values of \code{type} in \code{SpatialQuantile.ppp} and \code{\link{SpatialMedian.ppp}} are different. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. } \author{ \adrian. } \seealso{ \code{\link{SpatialMedian.ppp}}, \code{\link{SpatialMedian}}. } \examples{ X <- longleaf if(!interactive()) { ## mark values rounded to nearest multiple of 10 to reduce check time marks(X) <- round(marks(X), -1) } Z <- SpatialQuantile(X, prob=0.25, sigma=30) ZX <- SpatialQuantile(X, prob=0.25, sigma=30, at="points") } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/markconnect.Rd0000644000176200001440000001460614643125461017155 0ustar liggesusers\name{markconnect} \alias{markconnect} \title{ Mark Connection Function } \description{ Estimate the marked connection function of a multitype point pattern. } \usage{ markconnect(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to \code{\link{markcorr}}, or passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{TRUE}, normalise the pair connection function by dividing it by \eqn{p_i p_j}{p[i]*p[j]}, the estimated probability that randomly-selected points will have marks \eqn{i} and \eqn{j}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} has been estimated } \item{theo}{the theoretical value of \eqn{p_{ij}(r)}{p[i,j](r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{p_{ij}(r)}{p[i,j](r)} obtained by the edge corrections named. } \details{ The mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} of a multitype point process \eqn{X} is a measure of the dependence between the types of two points of the process a distance \eqn{r} apart. Informally \eqn{p_{ij}(r)}{p[i,j](r)} is defined as the conditional probability, given that there is a point of the process at a location \eqn{u} and another point of the process at a location \eqn{v} separated by a distance \eqn{||u-v|| = r}, that the first point is of type \eqn{i} and the second point is of type \eqn{j}. See Stoyan and Stoyan (1994). If the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{p_{ij}(r) \equiv p_i p_j}{p[i,j](r) = p[i]p[j]} where \eqn{p_i}{p[i]} denotes the probability that a point is of type \eqn{i}. Values larger than this, \eqn{p_{ij}(r) > p_i p_j}{p[i,j](r) > p[i]p[j]}, indicate positive association between the two types, while smaller values indicate negative association. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a multitype point pattern (a marked point pattern with factor-valued marks). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[i,j](r)} is estimated. There is a sensible default. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks) and is slow for complicated polygons. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } \item{none}{No edge correction.} } The option \code{correction="none"} should only be used if the number of data points is extremely large (otherwise an edge correction is needed to correct bias). Note that the estimator assumes the process is stationary (spatially homogeneous). The mark connection function is estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Multitype pair correlation \code{\link{pcfcross}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}. Use \code{\link{alltypes}} to compute the mark connection functions between all pairs of types. Mark correlation \code{\link{markcorr}} and mark variogram \code{\link{markvario}} for numeric-valued marks. } \examples{ # Hughes' amacrine data # Cells marked as 'on'/'off' M <- markconnect(amacrine, "on", "off") plot(M) # Compute for all pairs of types at once plot(alltypes(amacrine, markconnect)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/envelopeArray.Rd0000644000176200001440000000564114611073324017457 0ustar liggesusers\name{envelopeArray} \alias{envelopeArray} \title{ Array of Simulation Envelopes of Summary Function } \description{ Compute an array of simulation envelopes using a summary function that returns an array of curves. } \usage{ envelopeArray(X, fun, \dots, dataname = NULL, verb = FALSE, reuse = TRUE) } \arguments{ \item{X}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"lppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. The result of \code{fun} should be a function array (object of class \code{"fasp"}). } \item{\dots}{ Arguments passed to \code{\link{envelope}} to control the simulations, or passed to \code{fun} when evaluating the function. } \item{dataname}{ Optional character string name for the data. } \item{verb}{ Logical value indicating whether to print progress reports. } \item{reuse}{ Logical value indicating whether the envelopes in each panel should be based on the same set of simulated patterns (\code{reuse=TRUE}, the default) or on different, independent sets of simulated patterns (\code{reuse=FALSE}). } } \details{ This command is the counterpart of \code{\link{envelope}} when the function \code{fun} that is evaluated on each simulated point pattern will return an object of class \code{"fasp"} representing an array of summary functions. Simulated point patterns are generated according to the rules described for \code{\link{envelope}}. In brief, if \code{X} is a point pattern, the algorithm generates simulated point patterns of the same kind, according to complete spatial randomness. If \code{X} is a fitted model, the algorithm generates simulated point patterns according to this model. For each simulated point pattern \code{Y}, the function \code{fun} is invoked. The result \code{Z <- fun(Y, ...)} should be an object of class \code{"fasp"} representing an array of summary functions. The dimensions of the array \code{Z} should be the same for each simulated pattern \code{Y}. This algorithm finds the simulation envelope of the summary functions in each cell of the array. } \value{ An object of class \code{"fasp"} representing an array of envelopes. } \author{ \spatstatAuthors. } \seealso{ \code{\link{envelope}}, \code{\link{alltypes}}. } \examples{ if(interactive()) { Nsim <- 19 X <- finpines co <- "best" } else { ## smaller task to reduce check time Nsim <- 3 X <- finpines[c(FALSE, TRUE)] co <- "none" } A <- envelopeArray(X, markcrosscorr, nsim=Nsim, correction=co) plot(A) } \keyword{spatial} \keyword{nonparametric} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/Jdot.inhom.Rd0000644000176200001440000000733514611073323016655 0ustar liggesusers\name{Jdot.inhom} \alias{Jdot.inhom} \title{ Inhomogeneous Multitype J function (i-to-any) } \description{ For a multitype point pattern, estimate the inhomogeneous multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and points of any type. } \usage{ Jdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{J} function \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the point process. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdamin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution for the computation. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ This function is the counterpart of \code{\link{Jdot}} for inhomogeneous patterns. It is computed as a special case of \code{\link{Jmulti.inhom}}. } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{J} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Jonatan \Gonzalez and \adrian. } \seealso{ \code{\link{Jdot.inhom}}, \code{\link{Jmulti.inhom}}, \code{\link{Jdot}}. } \examples{ X <- rescale(amacrine) if(interactive() && require(spatstat.model)) { ## how to do it normally mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 dd <- NULL } else { ## for package testing lam <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 dd <- 32 } lamI <- lam[marks(X) == "on"] JD <- Jdot.inhom(X, "on", lambdaI=lamI, lambdadot=lam, lambdamin=lmin, dimyx=dd) } spatstat.explore/man/fv.object.Rd0000644000176200001440000000321614611073324016517 0ustar liggesusers\name{fv.object} \alias{fv.object} %DoNotExport \title{Function Value Table} \description{ A class \code{"fv"} to support the convenient plotting of several estimates of the same function. } \details{ An object of this class is a convenient way of storing and plotting several different estimates of the same function. It is a data frame with extra attributes indicating the recommended way of plotting the function, and other information. There are methods for \code{print} and \code{plot} for this class. Objects of class \code{"fv"} are returned by \code{\link[spatstat.explore]{Fest}}, \code{\link[spatstat.explore]{Gest}},\code{\link[spatstat.explore]{Jest}}, and \code{\link[spatstat.explore]{Kest}} along with many other functions. } \seealso{ Objects of class \code{"fv"} are returned by \code{\link[spatstat.explore]{Fest}}, \code{\link[spatstat.explore]{Gest}},\code{\link[spatstat.explore]{Jest}}, and \code{\link[spatstat.explore]{Kest}} along with many other functions. See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \examples{ K <- Kest(cells) class(K) K # prints a sensible summary plot(K) } \author{\adrian and \rolf } \keyword{spatial} \keyword{attribute} spatstat.explore/man/fryplot.Rd0000644000176200001440000001317214643125461016345 0ustar liggesusers\name{fryplot} \alias{fryplot} \alias{frypoints} \title{Fry Plot of Point Pattern} \description{ Displays the Fry plot (Patterson plot) of a spatial point pattern. } \usage{ fryplot(X, ..., width=NULL, from=NULL, to=NULL, axes=FALSE) frypoints(X, from=NULL, to=NULL, dmax=Inf) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} \item{width}{Optional parameter indicating the width of a box for a zoomed-in view of the Fry plot near the origin.} \item{from,to}{ Optional. Subset indices specifying which points of \code{X} will be considered when forming the vectors (drawn from each point of \code{from}, to each point of \code{to}.) } \item{axes}{ Logical value indicating whether to draw axes, crossing at the origin. } \item{dmax}{ Maximum distance between points. Pairs at greater distances do not contribute to the result. The default means there is no maximum distance. } } \details{ The function \code{fryplot} generates a Fry plot (or Patterson plot); \code{frypoints} returns the points of the Fry plot as a point pattern dataset. Fry (1979) and Hanna and Fry (1979) introduced a manual graphical method for investigating features of a spatial point pattern of mineral deposits. A transparent sheet, marked with an origin or centre point, is placed over the point pattern. The transparent sheet is shifted so that the origin lies over one of the data points, and the positions of all the \emph{other} data points are copied onto the transparent sheet. This procedure is repeated for each data point in turn. The resulting plot (the Fry plot) is a pattern of \eqn{n(n-1)} points, where \eqn{n} is the original number of data points. This procedure was previously proposed by Patterson (1934, 1935) for studying inter-atomic distances in crystals, and is also known as a Patterson plot. The function \code{fryplot} generates the Fry/Patterson plot. Standard graphical parameters such as \code{main}, \code{pch}, \code{lwd}, \code{col}, \code{bg}, \code{cex} can be used to control the appearance of the plot. To zoom in (to view only a subset of the Fry plot at higher magnification), use the argument \code{width} to specify the width of a rectangular field of view centred at the origin, or the standard graphical arguments \code{xlim} and \code{ylim} to specify another rectangular field of view. (The actual field of view may be slightly larger, depending on the graphics device.) The function \code{frypoints} returns the points of the Fry plot as a point pattern object. There may be a large number of points in this pattern, so this function should be used only if further analysis of the Fry plot is required. Fry plots are particularly useful for recognising anisotropy in regular point patterns. A void around the origin in the Fry plot suggests regularity (inhibition between points) and the shape of the void gives a clue to anisotropy in the pattern. Fry plots are also useful for detecting periodicity or rounding of the spatial coordinates. In mathematical terms, the Fry plot of a point pattern \code{X} is simply a plot of the vectors \code{X[i] - X[j]} connecting all pairs of distinct points in \code{X}. The Fry plot is related to the \eqn{K} function (see \code{\link{Kest}}) and the reduced second moment measure (see \code{\link{Kmeasure}}). For example, the number of points in the Fry plot lying within a circle of given radius is an unnormalised and uncorrected version of the \eqn{K} function. The Fry plot has a similar appearance to the plot of the reduced second moment measure \code{\link{Kmeasure}} when the smoothing parameter \code{sigma} is very small. The Fry plot does not adjust for the effect of the size and shape of the sampling window. The density of points in the Fry plot tapers off near the edges of the plot. This is an edge effect, a consequence of the bounded sampling window. In geological applications this is usually not important, because interest is focused on the behaviour near the origin where edge effects can be ignored. To correct for the edge effect, use \code{\link{Kmeasure}} or \code{\link{Kest}} or its relatives. } \value{ \code{fryplot} returns \code{NULL}. \code{frypoints} returns a point pattern (object of class \code{"ppp"}). } \references{ Fry, N. (1979) Random point distributions and strain measurement in rocks. \emph{Tectonophysics} \bold{60}, 89--105. Hanna, S.S. and Fry, N. (1979) A comparison of methods of strain determination in rocks from southwest Dyfed (Pembrokeshire) and adjacent areas. \emph{Journal of Structural Geology} \bold{1}, 155--162. Patterson, A.L. (1934) A Fourier series method for the determination of the component of inter-atomic distances in crystals. \emph{Physics Reviews} \bold{46}, 372--376. Patterson, A.L. (1935) A direct method for the determination of the components of inter-atomic distances in crystals. \emph{Zeitschrift fuer Krystallographie} \bold{90}, 517--554. } \seealso{ \code{\link{Kmeasure}}, \code{\link{Kest}} } \examples{ ## unmarked data fryplot(cells) Y <- frypoints(cells) ## numerical marks fryplot(longleaf, width=4, axes=TRUE) ## multitype points fryplot(amacrine, width=0.2, from=(marks(amacrine) == "on"), chars=c(3,16), cols=2:3, main="Fry plot centred at an On-cell") points(0,0) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/nndensity.Rd0000644000176200001440000000641314643125461016661 0ustar liggesusers\name{nndensity.ppp} \alias{nndensity} \alias{nndensity.ppp} \title{ Estimate Intensity of Point Pattern Using Nearest Neighbour Distances } \description{ Estimates the intensity of a point pattern using the distance from each spatial location to the \code{k}th nearest data point. } \usage{ nndensity(x, ...) \method{nndensity}{ppp}(x, k, ..., verbose = TRUE) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}) or some other spatial object. } \item{k}{ Integer. The distance to the \code{k}th nearest data point will be computed. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{nnmap}} and \code{\link[spatstat.geom]{as.mask}} controlling the pixel resolution. } \item{verbose}{ Logical. If \code{TRUE}, print the value of \code{k} when it is automatically selected. If \code{FALSE}, remain silent. } } \details{ This function computes a quick estimate of the intensity of the point process that generated the point pattern \code{x}. For each spatial location \eqn{s}, let \eqn{d(s)} be the distance from \eqn{s} to the \eqn{k}-th nearest point in the dataset \code{x}. If the data came from a homogeneous Poisson process with intensity \eqn{\lambda}{lambda}, then \eqn{\pi d(s)^2}{pi * d(s)^2} would follow a negative exponential distribution with mean \eqn{1/\lambda}{1/lambda}, and the maximum likelihood estimate of \eqn{\lambda}{lambda} would be \eqn{1/(\pi d(s)^2)}{1/(pi * d(s)^2)}. This is the estimate computed by \code{nndensity}, apart from an edge effect correction. See Cressie (1991, equation (8.5.14), p. 654) and Silverman (1986, p. 96). This estimator of intensity is relatively fast to compute, and is spatially adaptive (so that it can handle wide variation in the intensity function). However, it implicitly assumes the points are independent, so it does not perform well if the pattern is strongly clustered or strongly inhibited. In normal use, the value of \code{k} should be at least 3. (Theoretically the estimator has infinite expected value if \eqn{k=1}, and infinite variance if \eqn{k=2}. The computed intensity estimate will have infinite peaks around each data point if \code{k = 1}.) The default value of \code{k} is the square root of the number of points in \code{x}, which seems to work well in many cases. The window of \code{x} is digitised using \code{\link[spatstat.geom]{as.mask}} and the values \eqn{d(s)} are computed using \code{\link[spatstat.geom]{nnmap}}. To control the pixel resolution, see \code{\link[spatstat.geom]{as.mask}}. } \value{ A pixel image (object of class \code{"im"}) giving the estimated intensity of the point process at each spatial location. Pixel values are intensities (number of points per unit area). } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, New York. Silverman, B.W. (1986) \emph{Density Estimation}. Chapman and Hall, New York. } \seealso{ \code{\link{density.ppp}}, \code{\link[spatstat.geom]{intensity.ppp}} for alternative estimates of point process intensity. } \examples{ plot(nndensity(swedishpines)) } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/rhohat.Rd0000644000176200001440000005500614650323373016135 0ustar liggesusers\name{rhohat} \alias{rhohat} \alias{rhohat.ppp} \alias{rhohat.quad} \concept{Resource Selection Function} \concept{Prospectivity} \title{ Nonparametric Estimate of Intensity as Function of a Covariate } \description{ Computes a nonparametric estimate of the intensity of a point process, as a function of a (continuous) spatial covariate. } \usage{ rhohat(object, covariate, ...) \method{rhohat}{ppp}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps", "grow.frame", "shrink.frame"), n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) \method{rhohat}{quad}(object, covariate, ..., baseline=NULL, weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "mountain", "valley", "piecewise"), subset=NULL, do.CI=TRUE, jitter=TRUE, jitterfactor=1, interpolate=TRUE, dimyx=NULL, eps=NULL, rule.eps = c("adjust.eps", "grow.frame", "shrink.frame"), n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}, \code{"slrm"} or \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{weights}{ Optional weights attached to the data points. Either a numeric vector of weights for each data point, or a pixel image (object of class \code{"im"}) or a \code{function(x,y)} providing the weights. } \item{baseline}{ Optional baseline for intensity function. A \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the baseline at any location. } \item{method}{ Character string determining the estimation method. See Details. } \item{horvitz}{ Logical value indicating whether to use Horvitz-Thompson weights. See Details. } \item{smoother}{ Character string determining the smoothing algorithm and the type of curve that will be estimated. See Details. } \item{subset}{ Optional. A spatial window (object of class \code{"owin"}) specifying a subset of the data, from which the estimate should be calculated. } \item{do.CI}{ Logical value specifying whether to calculate standard errors and confidence bands. } \item{jitter}{ Logical value. If \code{jitter=TRUE} (the default), the values of the covariate at the data points will be jittered (randomly perturbed by adding a small amount of noise) using the function \code{\link[base]{jitter}}. If \code{jitter=FALSE}, the covariate values at the data points will not be altered. See the section on \emph{Randomisation and discretisation}. } \item{jitterfactor}{ Numeric value controlling the scale of noise added to the covariate values at the data points when \code{jitter=TRUE}. Passed to the function \code{\link[base]{jitter}} as the argument \code{factor}. } \item{interpolate}{ Logical value specifying whether to use spatial interpolation to obtain the values of the covariate at the data points, when the covariate is a pixel image (object of class \code{"im"}). If \code{interpolate=FALSE}, the covariate value for each data point is simply the value of the covariate image at the pixel centre that is nearest to the data point. If \code{interpolate=TRUE}, the covariate value for each data point is obtained by interpolating the nearest pixel values using \code{\link[spatstat.geom]{interp.im}}. } \item{dimyx,eps,rule.eps}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } \item{positiveCI}{ Logical value. If \code{TRUE}, confidence limits are always positive numbers; if \code{FALSE}, the lower limit of the confidence interval may sometimes be negative. Default is \code{FALSE} if \code{smoother="kernel"} and \code{TRUE} if \code{smoother="local"}. See Details. } \item{breaks}{ Breakpoints for the piecewise-constant function computed when \code{smoother='piecewise'}. Either a vector of numeric values specifying the breakpoints, or a single integer specifying the number of equally-spaced breakpoints. There is a sensible default. } } \details{ This command estimates the relationship between point process intensity and a given spatial covariate. Such a relationship is sometimes called a \emph{resource selection function} (if the points are organisms and the covariate is a descriptor of habitat) or a \emph{prospectivity index} (if the points are mineral deposits and the covariate is a geological variable). This command uses nonparametric methods which do not assume a particular form for the relationship. If \code{object} is a point pattern, and \code{baseline} is missing or null, this command assumes that \code{object} is a realisation of a point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is the resource selection function or prospectivity index. A nonparametric estimator of the function \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a point pattern, and \code{baseline} is given, then the intensity function is assumed to be \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)} where \eqn{B(u)} is the baseline intensity at location \eqn{u}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. The nonparametric estimation procedure is controlled by the arguments \code{smoother}, \code{method} and \code{horvitz}. The argument \code{smoother} selects the type of estimation technique. \itemize{ \item If \code{smoother="kernel"} (the default), the nonparametric estimator is a \emph{kernel smoothing estimator} of \eqn{\rho(z)}{rho(z)} (Guan, 2008; Baddeley et al, 2012). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z} which takes nonnegative values. If \code{do.CI=TRUE} (the default), confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="local"}, the nonparametric estimator is a \emph{local regression estimator} of \eqn{\rho(z)}{rho(z)} (Baddeley et al, 2012) obtained using local likelihood. The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. If \code{do.CI=TRUE} (the default), confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="increasing"}, we assume that \eqn{\rho(z)}{rho(z)} is an increasing function of \eqn{z}, and use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). The estimated function will be a step function, that is increasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Monotone estimates}. \item If \code{smoother="decreasing"}, we assume that \eqn{\rho(z)}{rho(z)} is a decreasing function of \eqn{z}, and use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). The estimated function will be a step function, that is decreasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Monotone estimates}. \item If \code{smoother="mountain"}, we assume that \eqn{\rho(z)}{rho(z)} is a function with an inverted U shape, with a single peak at a value \eqn{z_0}{z0}, so that \eqn{\rho(z)}{rho(z)} is an increasing function of \eqn{z} for \eqn{z < z_0}{z < z0} and a decreasing function of \eqn{z} for \eqn{z > z_0}{z > z0}. We compute the \emph{nonparametric maximum likelihood estimator}. The estimated function will be a step function, which is increasing and then decreasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Unimodal estimates}. \item If \code{smoother="valley"}, we assume that \eqn{\rho(z)}{rho(z)} is a function with a U shape, with a single minimum at a value \eqn{z_0}{z0}, so that \eqn{\rho(z)}{rho(z)} is a decreasing function of \eqn{z} for \eqn{z < z_0}{z < z0} and an increasing function of \eqn{z} for \eqn{z > z_0}{z > z0}. We compute the \emph{nonparametric maximum likelihood estimator}. The estimated function will be a step function, which is decreasing and then increasing as a function of \eqn{z}. Confidence bands are not computed. See the section on \emph{Unimodal estimates}. \item If \code{smoother="piecewise"}, the estimate of \eqn{\rho(z)}{rho(z)} is piecewise constant. The range of covariate values is divided into several intervals (ranges or bands). The endpoints of these intervals are the breakpoints, which may be specified by the argument \code{breaks}; there is a sensible default. The estimate of \eqn{\rho(z)}{rho(z)} takes a constant value on each interval. The estimate of \eqn{\rho(z)}{rho(z)} in each interval of covariate values is simply the average intensity (number of points per unit area) in the relevant sub-region. If \code{do.CI=TRUE} (the default), confidence bands are computed assuming a Poisson process. } See Baddeley (2018) for a comparison of these estimation techniques (except for \code{"mountain"} and \code{"valley"}). If the argument \code{weights} is present, then the contribution from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is multiplied by \code{weights[i]}. If the argument \code{subset} is present, then the calculations are performed using only the data inside this spatial region. This technique assumes that \code{covariate} has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. For a categorical covariate, use \code{\link[spatstat.geom]{intensity.quadratcount}} applied to the result of \code{\link[spatstat.geom]{quadratcount}(X, tess=covariate)}. The argument \code{covariate} should be a pixel image, or a function, or one of the strings \code{"x"} or \code{"y"} signifying the cartesian coordinates. It will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{dimyx,eps,rule.eps} which are passed to \code{\link[spatstat.geom]{as.mask}}. } \section{Smooth estimates}{ Smooth estimators of \eqn{\rho(z)}{rho(z)} were proposed by Baddeley and Turner (2005) and Baddeley et al (2012). Similar estimators were proposed by Guan (2008) and in the literature on relative distributions (Handcock and Morris, 1999). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. The smooth estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{\link[locfit]{locfit}}. } The argument \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates, The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. See Baddeley et al (2012), equation (8). This is similar but not identical to an estimator proposed by Guan (2008). \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. See Baddeley et al (2012), equation (9). \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. See Baddeley et al (2012), equation (10). } If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Pointwise confidence intervals for the true value of \eqn{\rho(z)} are also calculated for each \eqn{z}, and will be plotted as grey shading. The confidence intervals are derived using the central limit theorem, based on variance calculations which assume a Poisson point process. If \code{positiveCI=FALSE}, the lower limit of the confidence interval may sometimes be negative, because the confidence intervals are based on a normal approximation to the estimate of \eqn{\rho(z)}. If \code{positiveCI=TRUE}, the confidence limits are always positive, because the confidence interval is based on a normal approximation to the estimate of \eqn{\log(\rho(z))}{log(\rho(z))}. For consistency with earlier versions, the default is \code{positiveCI=FALSE} for \code{smoother="kernel"} and \code{positiveCI=TRUE} for \code{smoother="local"}. } \section{Monotone estimates}{ The nonparametric maximum likelihood estimator of a monotone function \eqn{\rho(z)}{rho(z)} was described by Sager (1982). This method assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. This estimator is chosen by specifying \code{smoother="increasing"} or \code{smoother="decreasing"}. The argument \code{method} is ignored this case. To compute the estimate of \eqn{\rho(z)}{rho(z)}, the algorithm first computes several primitive step-function estimates, and then takes the maximum of these primitive functions. If \code{smoother="decreasing"}, each primitive step function takes the form \eqn{\rho(z) = \lambda}{rho(z) = lambda} when \eqn{z \le t}, and \eqn{\rho(z) = 0}{rho(z) = 0} when \eqn{z > t}, where and \eqn{\lambda}{lambda} is a primitive estimate of intensity based on the data for \eqn{Z \le t}{Z <= t}. The jump location \eqn{t} will be the value of the covariate \eqn{Z} at one of the data points. The primitive estimate \eqn{\lambda}{lambda} is the average intensity (number of points divided by area) for the region of space where the covariate value is less than or equal to \eqn{t}. If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Confidence intervals are not available for the monotone estimators. } \section{Unimodal estimators}{ If \code{smoother="valley"} then we estimate a U-shaped function. A function \eqn{\rho(z)}{rho(z)} is U-shaped if it is decreasing when \eqn{z < z_0}{z < z0} and increasing when \eqn{z > z_0}{z > z0}, where \eqn{z_0}{z0} is called the critical value. The nonparametric maximum likelihood estimate of such a function can be computed by profiling over \eqn{z_0}{z0}. The algorithm considers all possible candidate values of the critical value \eqn{z_0}{z0}, and estimates the function \eqn{\rho(z)}{rho(z)} separately on the left and right of \eqn{z_0}{z0} using the monotone estimators described above. These function estimates are combined into a single function, and the Poisson point process likelihood is computed. The optimal value of \eqn{z_0}{z0} is the one which maximises the Poisson point process likelihood. If \code{smoother="mountain"} then we estimate a function which has an inverted U shape. A function \eqn{\rho(z)}{rho(z)} is inverted-U-shaped if it is increasing when \eqn{z < z_0}{z < z0} and decreasing when \eqn{z > z_0}{z > z0}. The nonparametric maximum likelihood estimate of such a function can be computed by profiling over \eqn{z_0}{z0} using the same technique \emph{mutatis mutandis}. Confidence intervals are not available for the unimodal estimators. } \section{Randomisation}{ By default, \code{rhohat} adds a small amount of random noise to the data. This is designed to suppress the effects of discretisation in pixel images. This strategy means that \code{rhohat} does not produce exactly the same result when the computation is repeated. If you need the results to be exactly reproducible, set \code{jitter=FALSE}. By default, the values of the covariate at the data points will be randomly perturbed by adding a small amount of noise using the function \code{\link[base]{jitter}}. To reduce this effect, set \code{jitterfactor} to a number smaller than 1. To suppress this effect entirely, set \code{jitter=FALSE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} (and confidence limits) for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A. (2018) A statistical commentary on mineral prospectivity analysis. Chapter 2, pages 25--65 in \emph{Handbook of Mathematical Geosciences: Fifty Years of IAMG}, edited by B.S. Daya Sagar, Q. Cheng and F.P. Agterberg. Springer, Berlin. Guan, Y. (2008) On consistent nonparametric intensity estimation for inhomogeneous spatial point processes. \emph{Journal of the American Statistical Association} \bold{103}, 1238--1247. Handcock, M.S. and Morris, M. (1999) \emph{Relative Distribution Methods in the Social Sciences}. Springer, New York. Sager, T.W. (1982) Nonparametric maximum likelihood estimation of spatial patterns. \emph{Annals of Statistics} \bold{10}, 1125--1136. } \author{ Smoothing algorithm by \adrian, Ya-Mei Chang, Yong Song, and \rolf. Nonparametric maximum likelihood algorithm by \adrian. } \seealso{ \code{\link[spatstat.explore]{rho2hat}}, \code{\link[spatstat.explore]{methods.rhohat}}, \code{\link[spatstat.model]{parres}}. See \code{\link[spatstat.model]{ppm}} for a parametric method for the same problem. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, "x") rho <- rhohat(X, function(x,y){x}) plot(rho) curve(exp(3+3*x), lty=3, col=4, lwd=2, add=TRUE) rhoB <- rhohat(X, "x", method="reweight") rhoC <- rhohat(X, "x", method="transform") rhoI <- rhohat(X, "x", smoother="increasing") rhoM <- rhohat(X, "x", smoother="mountain") plot(rhoI, add=TRUE, .y ~ .x, col=6) legend("top", lty=c(3, 1), col=c(4, 6), lwd=c(2, 1), legend=c("true", "increasing")) \testonly{rh <- rhohat(X, "x", dimyx=32)} } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.explore/man/plot.studpermutest.Rd0000644000176200001440000000721714611073324020551 0ustar liggesusers\name{plot.studpermutest} \alias{plot.studpermutest} \title{ Plot a Studentised Permutation Test } \description{ Plot the result of the studentised permutation test. } \usage{ \method{plot}{studpermutest}(x, fmla, \dots, lty = NULL, col = NULL, lwd = NULL, lty.theo = NULL, col.theo = NULL, lwd.theo = NULL, lwd.mean = if (meanonly) 1 else NULL, lty.mean = lty, col.mean = col, separately = FALSE, meanonly = FALSE, main = if (meanonly) "group means" else NULL, xlim = NULL, ylim = NULL, ylab = NULL, legend = !add, legendpos = "topleft", lbox = FALSE, add = FALSE) } \arguments{ \item{x}{ An object of class \code{"studpermutest"} generated by \code{\link{studpermu.test}} and representing the result of a studentised permutation test for spatial point pattern data. } \item{fmla}{ Plot formula used in \code{\link{plot.fv}}. } \item{\dots}{ Additional graphical arguments passed to \code{\link{plot.fv}}. } \item{lty,col,lwd}{ Line type, colour, and line width of the curves plotting the summary function for each point pattern in the original data. Either a single value or a vector of length equal to the number of point patterns. } \item{lty.theo,col.theo,lwd.theo}{ Line type, colour, and line width of the curve representing the theoretical value of the summary function. } \item{lty.mean,col.mean,lwd.mean}{ Line type, colour, and line width (as a multiple of \code{lwd}) of the curve representing the group mean of the summary function. } \item{separately}{ Logical value indicating whether to plot each group of data in a separate panel. } \item{meanonly}{ Logical value indicating whether to plot only the group means of the summary function. } \item{main}{ Character string giving a main title for the plot. } \item{xlim,ylim}{ Numeric vectors of length 2 giving the limits for the \eqn{x} and \eqn{y} coordinates of the plot or plots. } \item{ylab}{ Character string or expression to be used for the label on the \eqn{y} axis. } \item{legend}{ Logical value indicating whether to plot a legend explaining the meaning of each curve. } \item{legendpos}{ Position of legend. See \code{\link{plot.fv}}. } \item{lbox}{ Logical value indicating whether to plot a box around the plot. } \item{add}{ Logical value indicating whether the plot should be added to the existing plot (\code{add=TRUE}) or whether a new frame should be created (\code{add=FALSE}, the default). } } \details{ This is the \code{plot} method for objects of class \code{"studpermutest"} which represent the result of a studentised permutation test applied to several point patterns. The test is performed by \code{\link{studpermu.test}}. The plot shows the summary functions for each point pattern, coloured according to group. Optionally it can show the different groups in separate plot panels, or show only the group means in a single panel. } \value{ Null. } \author{ Ute Hahn. Modified for \code{spatstat} by \spatstatAuthors. } \seealso{ \code{\link{studpermu.test}} } \examples{ np <- if(interactive()) 99 else 19 testpyramidal <- studpermu.test(pyramidal, Neurons ~ group, nperm=np) plot(testpyramidal) plot(testpyramidal, meanonly=TRUE) plot(testpyramidal, col.theo=8, lwd.theo=4, lty.theo=1) plot(testpyramidal, . ~ pi * r^2) op <- par(mfrow=c(1,3)) plot(testpyramidal, separately=TRUE) plot(testpyramidal, separately=TRUE, col=2, lty=1, lwd.mean=2, col.mean=4) par(op) } \keyword{hplot} \keyword{htest} \concept{Goodness-of-fit} spatstat.explore/man/density.ppp.Rd0000644000176200001440000004366614643125461017136 0ustar liggesusers\name{density.ppp} \alias{density.ppp} \title{Kernel Smoothed Intensity of Point Pattern} \description{ Compute a kernel smoothed intensity function from a point pattern. } \usage{ \method{density}{ppp}(x, sigma=NULL, \dots, weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE, se=FALSE, wtype=c("value", "multiplicity"), kernel="gaussian", scalekernel=is.character(kernel), positive=FALSE, verbose=TRUE, sameas) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ The smoothing bandwidth (the amount of smoothing). The standard deviation of the isotropic smoothing kernel. Either a numerical value, or a function that computes an appropriate value of \code{sigma}. } \item{weights}{ Optional weights to be attached to the points. A numeric vector, numeric matrix, an \code{expression}, or a pixel image. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.geom]{pixellate.ppp}} and \code{\link[spatstat.geom]{as.mask}} to determine the pixel resolution, or passed to \code{sigma} if it is a function. } \item{edge}{ Logical value indicating whether to apply edge correction. } \item{varcov}{ Variance-covariance matrix of anisotropic smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{adjust}{ Optional. Adjustment factor for the smoothing parameter. } \item{diggle}{ Logical. If \code{TRUE}, use the Jones-Diggle improved edge correction, which is more accurate but slower to compute than the default correction. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{se}{ Logical value indicating whether to compute standard errors as well. } \item{wtype}{ Character string (partially matched) specifying how the weights should be interpreted for the calculation of standard error. See Details. } \item{positive}{ Logical value indicating whether to force all density values to be positive numbers. Default is \code{FALSE}. } \item{verbose}{ Logical value indicating whether to issue warnings about numerical problems and conditions. } \item{sameas}{ Optional. The result of a previous evaluation of \code{density.ppp}. Smoothing will be performed using the same kernel and bandwidth that were used to produce \code{sameas}. Namely the values of the arguments \code{kernel}, \code{sigma}, \code{varcov}, \code{scalekernel} and \code{adjust} will be overwritten by the values that were used to produce \code{sameas}. } } \value{ By default, the result is a pixel image (object of class \code{"im"}). Pixel values are estimated intensity values, expressed in \dQuote{points per unit area}. If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{x}. Values are estimated intensity values at the points of \code{x}. In either case, the return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. If \code{weights} is a matrix with more than one column, then the result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). If \code{se=TRUE}, the result is a list with two elements named \code{estimate} and \code{SE}, each of the format described above. } \details{ This is a method for the generic function \code{density}. It computes a fixed-bandwidth kernel estimate (Diggle, 1985) of the intensity function of the point process that generated the point pattern \code{x}. The amount of smoothing is controlled by \code{sigma} if it is specified. By default, smoothing is performed using a Gaussian kernel. The resulting density estimate is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with point masses at each of the data points in \code{x}. Anisotropic kernels, and non-Gaussian kernels, are also supported. Each point has unit weight, unless the argument \code{weights} is given. If \code{edge=TRUE} (the default), the intensity estimate is corrected for edge effect bias. If \code{at="pixels"} (the default), the result is a pixel image giving the estimated intensity at each pixel in a grid. If \code{at="points"}, the result is a numeric vector giving the estimated intensity at each of the original data points in \code{x}. } \section{Amount of smoothing}{ The amount of smoothing is determined by the arguments \code{sigma}, \code{varcov} and \code{adjust}. \itemize{ \item if \code{sigma} is a single numerical value, this is taken as the standard deviation of the isotropic Gaussian kernel. \item alternatively \code{sigma} may be a function that computes an appropriate bandwidth from the data point pattern by calling \code{sigma(x)}. To perform automatic bandwidth selection using cross-validation, it is recommended to use the functions \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} or \code{\link{bw.ppl}}. \item The smoothing kernel may be made anisotropic by giving the variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. \item Alternatively \code{sigma} may be a vector of length 2 giving the standard deviations of the \eqn{x} and \eqn{y} coordinates, thus equivalent to \code{varcov = diag(rep(sigma^2, 2))}. \item if neither \code{sigma} nor \code{varcov} is specified, an isotropic Gaussian kernel will be used, with a default value of \code{sigma} calculated by a simple rule of thumb that depends only on the size of the window. \item The argument \code{adjust} makes it easy for the user to change the bandwidth specified by any of the rules above. The value of \code{sigma} will be multiplied by the factor \code{adjust}. The matrix \code{varcov} will be multiplied by \code{adjust^2}. To double the smoothing bandwidth, set \code{adjust=2}. \item An infinite bandwidth, \code{sigma=Inf} or \code{adjust=Inf}, is permitted, and yields an intensity estimate which is constant over the spatial domain. } } \section{Edge correction}{ If \code{edge=TRUE}, the intensity estimate is corrected for edge effect bias in one of two ways: \itemize{ \item If \code{diggle=FALSE} (the default) the intensity estimate is correted by dividing it by the convolution of the Gaussian kernel with the window of observation. This is the approach originally described in Diggle (1985). Thus the intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = e(u) \sum_i k(x_i - u) w_i }{ \lambda(u) = e(u) \sum[i] k(x[i] - u) w[i] } where \eqn{k} is the Gaussian smoothing kernel, \eqn{e(u)} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. \item If \code{diggle=TRUE} then the code uses the improved edge correction described by Jones (1993) and Diggle (2010, equation 18.9). This has been shown to have better performance (Jones, 1993) but is slightly slower to compute. The intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = \sum_i k(x_i - u) w_i e(x_i) }{ \lambda(u) = \sum[i] k(x[i] - u) w[i] e(x[i]) } where again \eqn{k} is the Gaussian smoothing kernel, \eqn{e(x_i)}{e(x[i])} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. } In both cases, the edge correction term \eqn{e(u)} is the reciprocal of the kernel mass inside the window: \deqn{ \frac{1}{e(u)} = \int_W k(v-u) \, {\rm d}v }{ 1/e(u) = integral[v in W] k(v-u) dv } where \eqn{W} is the observation window. } \section{Smoothing kernel}{ By default, smoothing is performed using a Gaussian kernel. The choice of smoothing kernel is determined by the argument \code{kernel}. This should be a character string giving the name of a recognised two-dimensional kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. The default is a Gaussian kernel. If \code{scalekernel=TRUE} then the kernel values will be rescaled according to the arguments \code{sigma}, \code{varcov} and \code{adjust} as explained above, effectively treating \code{kernel} as the template kernel with standard deviation equal to 1. This is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, the kernel values will not be altered, and the arguments \code{sigma}, \code{varcov} and \code{adjust} are ignored. This is the default behaviour when \code{kernel} is a pixel image or a function. } \section{Desired output}{ If \code{at="pixels"} (the default), intensity values are computed at every location \eqn{u} in a fine grid, and are returned as a pixel image. The point pattern is first discretised using \code{\link[spatstat.geom]{pixellate.ppp}}, then the intensity is computed using the Fast Fourier Transform. Accuracy depends on the pixel resolution and the discretisation rule. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link[spatstat.geom]{as.mask}} (specify the number of pixels by \code{dimyx} or the pixel size by \code{eps}). The discretisation rule is controlled by the arguments \code{\dots} passed to \code{\link[spatstat.geom]{pixellate.ppp}} (the default rule is that each point is allocated to the nearest pixel centre; this can be modified using the arguments \code{fractional} and \code{preserve}). If \code{at="points"}, the intensity values are computed to high accuracy at the points of \code{x} only. Computation is performed by directly evaluating and summing the kernel contributions without discretising the data. The result is a numeric vector giving the density values. The intensity value at a point \eqn{x_i}{x[i]} is (if \code{diggle=FALSE}) \deqn{ \hat\lambda(x_i) = e(x_i) \sum_j k(x_j - x_i) w_j }{ \lambda(x[i]) = e(x[i]) \sum[j] k(x[j] - x[i]) w[j] } or (if \code{diggle=TRUE}) \deqn{ \hat\lambda(x_i) = \sum_j k(x_j - x_i) w_j e(x_j) }{ \lambda(x[i]) = \sum[j] k(x[j] - x[i]) w[j] e(x[j]) } If \code{leaveoneout=TRUE} (the default), then the sum in the equation is taken over all \eqn{j} not equal to \eqn{i}, so that the intensity value at a data point is the sum of kernel contributions from all \emph{other} data points. If \code{leaveoneout=FALSE} then the sum is taken over all \eqn{j}, so that the intensity value at a data point includes a contribution from the same point. } \section{Weights}{ If \code{weights} is a matrix with more than one column, then the calculation is effectively repeated for each column of weights. The result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). The argument \code{weights} can also be an \code{expression}. It will be evaluated in the data frame \code{as.data.frame(x)} to obtain a vector or matrix of weights. The expression may involve the symbols \code{x} and \code{y} representing the Cartesian coordinates, the symbol \code{marks} representing the mark values if there is only one column of marks, and the names of the columns of marks if there are several columns. The argument \code{weights} can also be a pixel image (object of class \code{"im"}). numerical weights for the data points will be extracted from this image (by looking up the pixel values at the locations of the data points in \code{x}). } \section{Standard error}{ If \code{se=TRUE}, the standard error of the estimate will also be calculated. The calculation assumes a Poisson point process. If \code{weights} are given, then the calculation of standard error depends on the interpretation of the weights. This is controlled by the argument \code{wtype}. \itemize{ \item If \code{wtype="value"} (the default), the weights are interpreted as numerical values observed at the data locations. Roughly speaking, standard errors are proportional to the absolute values of the weights. \item If \code{wtype="multiplicity"} the weights are interpreted as multiplicities so that a weight of 2 is equivalent to having a pair of duplicated points at the data location. Roughly speaking, standard errors are proportional to the square roots of the weights. Negative weights are not permitted. } The default rule is now \code{wtype="value"} but previous versions of \code{density.ppp} (in \pkg{spatstat.explore} versions \code{3.1-0} and earlier) effectively used \code{wtype="multiplicity"}. } \section{The meaning of \code{density.ppp}}{ This function is often misunderstood. The result of \code{density.ppp} is not a spatial smoothing of the marks or weights attached to the point pattern. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. The result of \code{density.ppp} is not a probability density. It is an estimate of the \emph{intensity function} of the point process that generated the point pattern data. Intensity is the expected number of random points per unit area. The units of intensity are \dQuote{points per unit area}. Intensity is usually a function of spatial location, and it is this function which is estimated by \code{density.ppp}. The integral of the intensity function over a spatial region gives the expected number of points falling in this region. Inspecting an estimate of the intensity function is usually the first step in exploring a spatial point pattern dataset. For more explanation, see Baddeley, Rubak and Turner (2015) or Diggle (2003, 2010). If you have two (or more) types of points, and you want a probability map or relative risk surface (the spatially-varying probability of a given type), use \code{\link{relrisk}}. } \section{Technical issue: Negative Values}{ Negative and zero values of the density estimate are possible when \code{at="pixels"} because of numerical errors in finite-precision arithmetic. By default, \code{density.ppp} does not try to repair such errors. This would take more computation time and is not always needed. (Also it would not be appropriate if \code{weights} include negative values.) To ensure that the resulting density values are always positive, set \code{positive=TRUE}. } \seealso{ To select the bandwidth \code{sigma} automatically by cross-validation, use \code{\link{bw.diggle}}, \code{\link{bw.CvL}}, \code{\link{bw.scott}} or \code{\link{bw.ppl}}. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. For adaptive nonparametric estimation, see \code{\link{adaptive.density}}. For data sharpening, see \code{\link{sharpen.ppp}}. To compute a relative risk surface or probability map for two (or more) types of points, use \code{\link{relrisk}}. For information about the data structures, see \code{\link[spatstat.geom]{ppp.object}}, \code{\link[spatstat.geom]{im.object}}. } \examples{ if(interactive()) { opa <- par(mfrow=c(1,2)) plot(density(cells, 0.05)) plot(density(cells, 0.05, diggle=TRUE)) par(opa) v <- diag(c(0.05, 0.07)^2) plot(density(cells, varcov=v)) } # automatic bandwidth selection plot(density(cells, sigma=bw.diggle(cells))) # equivalent: plot(density(cells, bw.diggle)) # evaluate intensity at points density(cells, 0.05, at="points") # non-Gaussian kernel plot(density(cells, sigma=0.4, kernel="epanechnikov")) if(interactive()) { # see effect of changing pixel resolution opa <- par(mfrow=c(1,2)) plot(density(cells, sigma=0.4)) plot(density(cells, sigma=0.4, eps=0.05)) par(opa) } # relative risk calculation by hand (see relrisk.ppp) lung <- split(chorley)$lung larynx <- split(chorley)$larynx D <- density(lung, sigma=2) plot(density(larynx, sigma=2, weights=1/D)) } \references{ \baddrubaturnbook Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Diggle, P.J. (2010) Nonparametric methods. Chapter 18, pp. 299--316 in A.E. Gelfand, P.J. Diggle, M. Fuentes and P. Guttorp (eds.) \emph{Handbook of Spatial Statistics}, CRC Press, Boca Raton, FL. Jones, M.C. (1993) Simple boundary corrections for kernel density estimation. \emph{Statistics and Computing} \bold{3}, 135--146. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/spatialcdf.Rd0000644000176200001440000000673414643125462016767 0ustar liggesusers\name{spatialcdf} \alias{spatialcdf} \title{ Spatial Cumulative Distribution Function } \description{ Compute the spatial cumulative distribution function of a spatial covariate, optionally using spatially-varying weights. } \usage{ spatialcdf(Z, weights = NULL, normalise = FALSE, ..., W = NULL, Zname = NULL) } \arguments{ \item{Z}{ Spatial covariate. A pixel image or a \code{function(x,y,...)} } \item{weights}{ Spatial weighting for different locations. A pixel image, a \code{function(x,y,...)}, a window, a constant value, or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{normalise}{ Logical. Whether the weights should be normalised so that they sum to 1. } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{as.mask}} to determine the pixel resolution, or extra arguments passed to \code{Z} if it is a function. } \item{W}{ Optional window (object of class \code{"owin"}) defining the spatial domain. } \item{Zname}{ Optional character string for the name of the covariate \code{Z} used in plots. } } \details{ If \code{weights} is missing or \code{NULL}, it defaults to 1. The values of the covariate \code{Z} are computed on a grid of pixels. The weighted cumulative distribution function of \code{Z} values is computed, taking each value with weight equal to the pixel area. The resulting function \eqn{F} is such that \eqn{F(t)} is the area of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a pixel image or a function, then the values of \code{weights} and of the covariate \code{Z} are computed on a grid of pixels. The \code{weights} are multiplied by the pixel area. Then the weighted empirical cumulative distribution function of \code{Z} values is computed using \code{\link[spatstat.univar]{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the total weight (or weighted area) of the region of space where \eqn{Z \le t}{Z <= t}. If \code{weights} is a fitted point process model, then it should be a Poisson process. The fitted intensity of the model, and the value of the covariate \code{Z}, are evaluated at the quadrature points used to fit the model. The \code{weights} are multiplied by the weights of the quadrature points. Then the weighted empirical cumulative distribution of \code{Z} values is computed using \code{\link[spatstat.univar]{ewcdf}}. The resulting function \eqn{F} is such that \eqn{F(t)} is the expected number of points in the point process that will fall in the region of space where \eqn{Z \le t}{Z <= t}. If \code{normalise=TRUE}, the function is normalised so that its maximum value equals 1, so that it gives the cumulative \emph{fraction} of weight or cumulative fraction of points. The result can be printed, plotted, and used as a function. } \value{ A cumulative distribution function object belonging to the classes \code{"spatialcdf"}, \code{"ewcdf"}, \code{"ecdf"} (only if \code{normalise=TRUE}) and \code{"stepfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.univar]{ewcdf}}, \code{\link{cdf.test}} } \examples{ with(bei.extra, { plot(spatialcdf(grad)) if(require("spatstat.model")) { fit <- ppm(bei ~ elev) plot(spatialcdf(grad, predict(fit))) A <- spatialcdf(grad, fit) A(0.1) } }) plot(spatialcdf("x", W=letterR)) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Extract.fasp.Rd0000644000176200001440000000342614611073324017204 0ustar liggesusers\name{Extract.fasp} \alias{[.fasp} \title{Extract Subset of Function Array} \description{ Extract a subset of a function array (an object of class \code{"fasp"}). } \usage{ \method{[}{fasp}(x, I, J, drop=TRUE,\dots) } \arguments{ \item{x}{ A function array. An object of class \code{"fasp"}. } \item{I}{ any valid expression for a subset of the row indices of the array. } \item{J}{ any valid expression for a subset of the column indices of the array. } \item{drop}{ Logical. When the selected subset consists of only one cell of the array, if \code{drop=FALSE} the result is still returned as a \eqn{1 \times 1}{1 * 1} array of functions (class \code{"fasp"}) while if \code{drop=TRUE} it is returned as a function (class \code{"fv"}). } \item{\dots}{Ignored.} } \value{ A function array (of class \code{"fasp"}). Exceptionally, if the array has only one cell, and if \code{drop=TRUE}, then the result is a function value table (class \code{"fv"}). } \details{ A function array can be regarded as a matrix whose entries are functions. See \code{\link{fasp.object}} for an explanation of function arrays. This routine extracts a sub-array according to the usual conventions for matrix indexing. } \seealso{ \code{\link{fasp.object}} } \examples{ online <- interactive() # Lansing woods data - multitype points with 6 types X <- lansing if(!online) { # subsample data (from 2251 to 450 points) to shorten check time X <- X[c(FALSE,FALSE,FALSE,FALSE,TRUE)] } a <- alltypes(X, 'K') # extract first three marks only b <- a[1:3,1:3] if(online) {plot(b)} # subset of array pertaining to hickories h <- a["hickory", ] if(online) {plot(h)} } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.explore/man/Smoothfun.ppp.Rd0000644000176200001440000000372514643125462017432 0ustar liggesusers\name{Smoothfun.ppp} \alias{Smoothfun} \alias{Smoothfun.ppp} \title{ Smooth Interpolation of Marks as a Spatial Function } \description{ Perform spatial smoothing of numeric values observed at a set of irregular locations, and return the result as a function of spatial location. } \usage{ Smoothfun(X, \dots) \method{Smoothfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Marked point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{Smooth.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{Smooth.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{Smooth.ppp}}. } } \details{ The commands \code{Smoothfun} and \code{\link{Smooth}} both perform kernel-smoothed spatial interpolation of numeric values observed at irregular spatial locations. The difference is that \code{\link{Smooth}} returns a pixel image, containing the interpolated values at a grid of locations, while \code{Smoothfun} returns a \code{function(x,y)} which can be used to compute the interpolated value at \emph{any} spatial location. For purposes such as model-fitting it is more accurate to use \code{Smoothfun} to interpolate data. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"Smoothfun"} which has methods for \code{print} and \code{\link[spatstat.geom]{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{Smooth}} } \examples{ f <- Smoothfun(longleaf) f f(120, 80) plot(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/Window.quadrattest.Rd0000644000176200001440000000174314643125461020456 0ustar liggesusers\name{Window.quadrattest} \alias{Window.quadrattest} \title{Extract Window of Spatial Object} \description{ Given a spatial object (such as a point pattern or pixel image) in two dimensions, these functions extract the window in which the object is defined. } \usage{ \method{Window}{quadrattest}(X, \dots) } \arguments{ \item{X}{A spatial object.} \item{\dots}{Ignored.} } \value{ An object of class \code{"owin"} (see \code{\link[spatstat.geom]{owin.object}}) specifying an observation window. } \details{ These are methods for the generic function \code{\link[spatstat.geom]{Window}} which extract the spatial window in which the object \code{X} is defined. } \seealso{ \code{\link[spatstat.geom]{Window}}, \code{\link[spatstat.geom]{Window.ppp}}, \code{\link[spatstat.geom]{Window.psp}}. \code{\link[spatstat.geom]{owin.object}} } \examples{ A <- quadrat.test(cells, 4) Window(A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.explore/man/transect.im.Rd0000644000176200001440000000551214643125462017075 0ustar liggesusers\name{transect.im} \alias{transect.im} \title{ Pixel Values Along a Transect } \description{ Extract the pixel values of a pixel image at each point along a linear transect. } \usage{ transect.im(X, ..., from="bottomleft", to="topright", nsample=512, click=FALSE, add=FALSE, curve=NULL) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } \item{from,to}{ Optional. Start point and end point of the transect. Pairs of \eqn{(x,y)} coordinates in a format acceptable to \code{\link{xy.coords}}, or keywords \code{"bottom"}, \code{"left"}, \code{"top"}, \code{"right"}, \code{"bottomleft"} etc. } \item{nsample}{ Integer. Number of sample locations along the transect. } \item{click}{ Optional. Logical value. If \code{TRUE}, the linear transect is determined interactively by the user, who clicks two points on the current plot. } \item{add}{ Logical. If \code{click=TRUE}, this argument determines whether to perform interactive tasks on the current plot (\code{add=TRUE}) or to start by plotting \code{X} (\code{add=FALSE}). } \item{curve}{ Optional. A specification of a curved transect. See the section on Curved Transect. } } \details{ The pixel values of the image \code{X} along a line segment will be extracted. The result is a function table (\code{"fv"} object) which can be plotted directly. If \code{click=TRUE}, then the user is prompted to click two points on the plot of \code{X}. These endpoints define the transect. Otherwise, the transect is defined by the endpoints \code{from} and \code{to}. The default is a diagonal transect from bottom left to top right of the frame. } \section{Curved Transect}{ If \code{curve} is given, then the transect will be a curve. The argument \code{curve} should be a list with the following arguments: \describe{ \item{f}{A function in the \R language with one argument \code{t}.} \item{tlim}{A numeric vector of length 2 giving the range of values of the argument \code{t}.} \item{tname}{(Optional) a character string giving the symbolic name of the function argument \code{t}; defaults to \code{"t"}.} \item{tdescrip}{(Optional) a character string giving a short description of the function argument \code{t}; defaults to \code{"curve parameter"}.} } The function \code{f} must return a 2-column matrix or data frame specifying the spatial coordinates \code{(x,y)} of locations along the curve, determined by the values of the input argument \code{t}. } \value{ An object of class \code{"fv"} which can be plotted. } \author{ \adrian and \rolf } \seealso{ \code{\link[spatstat.geom]{im}} } \examples{ Z <- bei.extra$elev plot(transect.im(Z)) } \keyword{spatial} \keyword{manip} \keyword{iplot} spatstat.explore/man/Ginhom.Rd0000644000176200001440000001546214643125461016073 0ustar liggesusers\name{Ginhom} \alias{Ginhom} \title{ Inhomogeneous Nearest Neighbour Function } \description{ Estimates the inhomogeneous nearest neighbour function \eqn{G} of a non-stationary point pattern. } \usage{ Ginhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE, update = TRUE, warn.bias=TRUE, savelambda=FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{G} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link[spatstat.geom]{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ This argument is for internal use only. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } \item{update}{ Logical. If \code{lambda} is a fitted model (class \code{"ppm"} or \code{"kppm"}) and \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link[spatstat.model]{update.ppm}} or \code{\link[spatstat.model]{update.kppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without fitting it to \code{X}. } \item{warn.bias}{ Logical value specifying whether to issue a warning when the inhomogeneity correction factor takes extreme values, which can often lead to biased results. This usually occurs when insufficient smoothing is used to estimate the intensity. } \item{savelambda}{ Logical value specifying whether to save the values of \code{lmin} and \code{lambda} as attributes of the result. } } \details{ This command computes estimates of the inhomogeneous \eqn{G}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the nearest-neighbour distance distribution function \eqn{G} for homogeneous point patterns computed by \code{\link{Gest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{G} function is computed using the border correction, equation (7) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"} or \code{"kppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Finhom}}, \code{\link{Jinhom}}, \code{\link{Gest}} } \examples{ plot(Ginhom(swedishpines, sigma=10)) \donttest{ plot(Ginhom(swedishpines, sigma=bw.diggle, adjust=2)) } } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by \adrian and \ege. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/as.function.rhohat.Rd0000644000176200001440000000676314611073323020363 0ustar liggesusers\name{as.function.rhohat} \alias{as.function.rhohat} \title{ Convert Function Table to Function } \description{ Converts an object of class \code{"rhohat"} to an \R language function. } \usage{ \method{as.function}{rhohat}(x, ..., value=".y", extrapolate=TRUE) } \arguments{ \item{x}{ Object of class \code{"rhohat"}, produced by the function \code{\link{rhohat}}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ An object of class \code{"rhohat"} is essentially a data frame of estimated values of the function \eqn{rho(x)} as described in the help file for \code{\link{rhohat}}. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.rhohat}. It converts an object \code{x} of class \code{"rhohat"} to an \R function \code{f}. The command \code{as.function.rhohat} is a method for the generic command \code{\link{as.function}} for the class \code{"rhohat"}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). To apply different rules to the left and right extremes, use \code{extrapolate=c(TRUE,FALSE)} and so on. Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ \adrian and \rolf } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rhohat}}, \code{\link{as.function.fv}}. } \examples{ g <- rhohat(cells, "x") f <- as.function(g) f f(0.1) } \keyword{spatial} \keyword{methods} spatstat.explore/man/densityfun.Rd0000644000176200001440000000663514643125461017044 0ustar liggesusers\name{densityfun.ppp} \alias{densityfun} \alias{densityfun.ppp} \title{ Kernel Estimate of Intensity as a Spatial Function } \description{ Compute a kernel estimate of intensity for a point pattern, and return the result as a function of spatial location. } \usage{ densityfun(X, \dots) \method{densityfun}{ppp}(X, sigma = NULL, \dots, weights = NULL, edge = TRUE, diggle = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Smoothing bandwidth, or bandwidth selection function, passed to \code{\link{density.ppp}}. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}}. } \item{weights}{ Optional vector of weights associated with the points of \code{X}. } \item{edge,diggle}{ Logical arguments controlling the edge correction. Arguments passed to \code{\link{density.ppp}}. } } \details{ The commands \code{densityfun} and \code{\link{density}} both perform kernel estimation of the intensity of a point pattern. The difference is that \code{\link{density}} returns a pixel image, containing the estimated intensity values at a grid of locations, while \code{densityfun} returns a \code{function(x,y)} which can be used to compute the intensity estimate at \emph{any} spatial locations with coordinates \code{x,y}. For purposes such as model-fitting it is more accurate to use \code{densityfun}. } \section{Using the result of \code{densityfun}}{ If \code{f <- densityfun(X)}, where \code{X} is a two-dimensional point pattern, the resulting object \code{f} is a \code{function} in the \R language. By calling this function, the user can evaluate the estimated intensity at any desired spatial locations. Additionally \code{f} belongs to other classes which allow it to be printed and plotted easily. The function \code{f} has arguments \code{x,y,drop}. \itemize{ \item The arguments \code{x,y} of \code{f} specify the query locations. They can be numeric vectors of coordinates. Alternatively \code{x} can be a point pattern (or data acceptable to \code{\link[spatstat.geom]{as.ppp}}) and \code{y} is omitted. The result of \code{f(x,y)} is a numeric vector giving the values of the intensity. \item The argument \code{drop} of \code{f} specifies how to handle query locations which are outside the window of the original data. If \code{drop=TRUE} (the default), such locations are ignored. If \code{drop=FALSE}, a value of \code{NA} is returned for each such location. } Note that the smoothing parameters, such as the bandwidth \code{sigma}, are assigned when \code{densityfun} is executed. Smoothing parameters are fixed inside the function \code{f} and cannot be changed by arguments of \code{f}. } \value{ A \code{function} with arguments \code{x,y,drop}. The function also belongs to the class \code{"densityfun"} which has methods for \code{print} and \code{\link[spatstat.geom]{as.im}}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{density}}. To interpolate values observed at the points, use \code{\link{Smoothfun}}. } \examples{ f <- densityfun(swedishpines) f f(42, 60) X <- runifpoint(2, Window(swedishpines)) f(X) plot(f) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/bits.test.Rd0000644000176200001440000001201414611073323016551 0ustar liggesusers\name{bits.test} \alias{bits.test} \title{ Balanced Independent Two-Stage Monte Carlo Test } \description{ Performs a Balanced Independent Two-Stage Monte Carlo test of goodness-of-fit for spatial pattern. } \usage{ bits.test(X, \dots, exponent = 2, nsim=19, alternative=c("two.sided", "less", "greater"), leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of replicates in each stage of the test. A total of \code{nsim * (nsim + 1)} simulated point patterns will be generated, and the \eqn{p}-value will be a multiple of \code{1/(nsim+1)}. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Balanced Independent Two-Stage Monte Carlo test proposed by Baddeley et al (2017), an improvement of the Dao-Genton (2014) test. If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis} \bold{114}, 75--87. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ Simulation envelopes: \code{\link{bits.envelope}}. Other tests: \code{\link{dg.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}}. } \examples{ ns <- if(interactive()) 19 else 4 bits.test(cells, nsim=ns) bits.test(cells, alternative="less", nsim=ns) bits.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} \concept{Goodness-of-fit} spatstat.explore/man/spatstat.explore-internal.Rd0000644000176200001440000004021314705645676022012 0ustar liggesusers\name{spatstat.explore-internal} \title{Internal spatstat.explore functions} \alias{spatstat.explore-internal} %DoNotExport \alias{[.localpcfmatrix} \alias{[.rat} \alias{adjust.ratfv} \alias{ang2rad} \alias{as.data.frame.bw.optim} \alias{as.data.frame.fv} \alias{assemble.plot.objects} \alias{bandwidth.is.infinite} \alias{BartCalc} \alias{bermantestCalc} \alias{bermantestEngine} \alias{bind.ratfv} \alias{bw.optim} \alias{calc.DR} \alias{calc.NNIR} \alias{calc.SAVE} \alias{calc.SIR} \alias{calc.TSE} \alias{censtimeCDFest} \alias{check.testfun} \alias{circticks} \alias{clarkevansCalc} \alias{compatible.rat} \alias{conform.ratfv} \alias{CressieReadStatistic} \alias{CressieReadSymbol} \alias{CressieReadName} \alias{cutoff2Dkernel} \alias{CVforPCF} \alias{Deviation} \alias{densitycrossEngine} \alias{densitypointsEngine} \alias{digestCovariates} \alias{digital.volume} \alias{dim.fasp} \alias{dimnames.fasp} \alias{dimnames<-.fasp} \alias{distributecbind} \alias{ensure.listarg} \alias{envelopeEngine} \alias{envelopeProgressData} \alias{envelopeTest} \alias{envelope.hasenvelope} \alias{envelope.matrix} \alias{evaluateCovariate} \alias{evaluateCovariateAtPoints} \alias{evaluateCovariateAtPixels} \alias{evaluate2Dkernel} \alias{ExpSmoothLog} \alias{exactppm} \alias{extractAtomicQtests} \alias{fasp} \alias{f3engine} \alias{f3Cengine} \alias{findbestlegendpos} \alias{findcbind} \alias{flatfname} \alias{FormatFaspFormulae} \alias{fvexprmap} \alias{fvlabels} \alias{fvlabels<-} \alias{fvlabelmap} \alias{fvlegend} \alias{g3engine} \alias{g3Cengine} \alias{getSumFun} \alias{good.correction.K} \alias{hasenvelope} \alias{implemented.for.K} \alias{is.atomicQtest} \alias{is.poisson.exactppm} \alias{is.scov} \alias{is.stationary.exactppm} \alias{k3engine} \alias{Kborder.engine} \alias{Knone.engine} \alias{Krect.engine} \alias{Kount} \alias{Kwtsum} \alias{localKengine} \alias{localKmultiEngine} \alias{localpcfengine} \alias{localpcfmatrix} \alias{lookup2DkernelInfo} \alias{makefvlabel} \alias{maskLaslett} \alias{match2DkernelName} \alias{mctestSigtraceEngine} \alias{meanlistfv} \alias{names<-.fv} \alias{nncleanEngine} \alias{pairs.listof} \alias{pairs.solist} \alias{pcf3engine} \alias{pcfmulti.inhom} \alias{plot.bw.frac} \alias{plot.bw.optim} \alias{plot.localpcfmatrix} \alias{plot.plotpairsim} \alias{plot.spatialcdf} \alias{polyLaslett} \alias{predict.exactppm} \alias{prefixfv} \alias{print.bw.frac} \alias{print.bw.optim} \alias{print.densityfun} \alias{print.envelope} \alias{print.exactppm} \alias{print.fasp} \alias{print.fv} \alias{print.fvfun} \alias{print.hasenvelope} \alias{print.laslett} \alias{print.localpcfmatrix} \alias{print.plotpairsim} \alias{print.quadrattest} \alias{print.rat} \alias{print.Smoothfun} \alias{print.summary.bw.optim} \alias{print.summary.ssf} \alias{quadrat.testEngine} \alias{ratfv} \alias{rebadge.as.crossfun} \alias{rebadge.as.dotfun} \alias{rebadge.fv} \alias{rebadgeLabels} \alias{reconcile.fv} \alias{RelevantDeviation} \alias{rename.fv} \alias{resolve.2D.kernel} \alias{resolve.foxall.window} \alias{resolve.lambda} \alias{resolve.lambda.ppp} \alias{resolve.lambdacross} \alias{resolve.lambdacross.ppp} \alias{resolve.reciplambda} \alias{resolve.reciplambda.ppp} \alias{resolveEinfo} \alias{rhohatEngine} \alias{rhohatCalc} \alias{rmax.Rigid} \alias{rmax.rule} \alias{rocData} \alias{rocModel} \alias{roseContinuous} \alias{scanmeasure} \alias{scanmeasure.ppp} \alias{scanmeasure.im} \alias{scanBinomLRTS} \alias{scanPoisLRTS} \alias{second.moment.calc} \alias{second.moment.engine} \alias{sewpcf} \alias{sewsmod} \alias{shift.quadrattest} \alias{simulrecipe} \alias{StieltjesCalc.fv} \alias{Smooth.solist} \alias{smoothcrossEngine} \alias{smoothpointsEngine} \alias{spatialCDFframe} \alias{spatialCDFtest} \alias{spatialCDFtestCalc} \alias{spatialCovariateEvidence} \alias{spatialCovariateEvidence.exactppm} \alias{spatialCovariateEvidence.ppp} \alias{sphere.volume} \alias{summary.envelope} \alias{summary.bw.optim} \alias{tweak.fv.entry} \alias{tweak.ratfv.entry} \alias{twostage.test} \alias{twostage.envelope} \alias{updateData} \alias{updateData.default} \alias{validate2Dkernel} \alias{validate.angles} \alias{validate.weights} \alias{vanilla.fv} \alias{weightedclosepairs} \alias{X2testEngine} %%%%%%% \description{ Internal spatstat.explore functions. } \usage{ \method{[}{localpcfmatrix}(x, i, \dots) \method{[}{rat}(x, \dots) adjust.ratfv(f, columns, numfactor, denfactor) ang2rad(ang, unit, start, clockwise) \method{as.data.frame}{bw.optim}(x, \dots) \method{as.data.frame}{fv}(x, \dots) assemble.plot.objects(xlim, ylim, \dots, lines, polygon) bandwidth.is.infinite(sigma) BartCalc(fY, fK) bermantestCalc(fram, which, alternative, \dots) bermantestEngine(model, covariate, which, alternative, \dots, modelname, covname, dataname) bind.ratfv(x, numerator, denominator, labl, desc, preferred, ratio, quotient) bw.optim(cv, h, iopt, \dots, cvname, hname, criterion, optimum, warnextreme, hargnames, yexp, unitname, template, exponent, hword) calc.DR(COV, z, Dim) calc.NNIR(COV, z, pos, Dim) calc.SAVE(COV, z, Dim) calc.SIR(COV, z) calc.TSE(COV, z, pos, Dim1, Dim2) censtimeCDFest(o, cc, d, breaks, \dots, KM, RS, HAN, RAW, han.denom, tt, pmax, fname, fexpr) check.testfun(f, f1, X) circticks(R, at, unit, start, clockwise, labels) clarkevansCalc(X, correction, clipregion, working) \method{compatible}{rat}(A, B, \dots) conform.ratfv(x) CressieReadStatistic(OBS,EXP,lambda,normalise,named) CressieReadSymbol(lambda) CressieReadName(lambda) cutoff2Dkernel(kernel, sigma, varcov, \dots, scalekernel, cutoff, fatal) CVforPCF(bw, stuff) Deviation(x, ref, leaveout, n, xi) densitycrossEngine(Xdata, Xquery, sigma, \dots, kernel, scalekernel, weights, edge, varcov, diggle, sorted, cutoff, se, kerpow) densitypointsEngine(x, sigma, \dots, kernel, scalekernel, kerpow, weights, edge, varcov, leaveoneout, diggle, sorted, spill, cutoff, debug) digestCovariates(\dots, W) digital.volume(range, nval, vside) \method{dim}{fasp}(x) \method{dimnames}{fasp}(x) \method{dimnames}{fasp}(x) <- value distributecbind(x) ensure.listarg(x, n, singletypes, xtitle, things) envelopeEngine(X, fun, simul, nsim, nrank, \dots, funargs, funYargs, verbose, clipdata, transform, global, ginterval, use.theory, alternative, scale, clamp, savefuns, savepatterns, saveresultof, weights, nsim2, VARIANCE, nSD, Yname, maxnerr, rejectNA, silent, maxerr.action, internal, cl, envir.user, expected.arg, do.pwrong, foreignclass, collectrubbish) envelopeProgressData(X, fun, \dots, exponent, alternative, leaveout, scale, clamp, normalize, deflate, rmin, save.envelope, savefuns, savepatterns) envelopeTest(X, \dots, exponent, alternative, rinterval, leaveout, scale, clamp, tie.rule, interpolate, save.interpolant, save.envelope, savefuns, savepatterns, Xname, badXfatal, verbose) \method{envelope}{hasenvelope}(Y, \dots, Yname) \method{envelope}{matrix}(Y, \dots, argvals, rvals, observed, theory, funX, nsim, nsim2, jsim, jsim.mean, type, alternative, scale, clamp, csr, use.theory, nrank, ginterval, nSD, savefuns, check, Yname, argname, arg.desc, do.pwrong, weights, precomputed, gaveup) evaluateCovariate(covariate, locations, \dots) evaluateCovariateAtPixels(covariate, locations, \dots, types, eps, dimyx, rule.eps) evaluateCovariateAtPoints(covariate, locations, \dots, allow.column) evaluate2Dkernel(kernel, x, y, sigma, varcov, \dots, scalekernel) ExpSmoothLog(X, \dots, at, weights, se) exactppm(X, baseline, \dots, subset, eps, dimyx, rule.eps) extractAtomicQtests(x) fasp(fns, which, formulae, dataname, title, rowNames, colNames, checkfv) f3engine(x, y, z, box, vside, range, nval, correction) f3Cengine(x, y, z, box, vside, rmax, nrval) findbestlegendpos(\dots) findcbind(root, depth, maxdepth) flatfname(x) FormatFaspFormulae(f, argname) fvexprmap(x) fvlabels(x, expand=FALSE) fvlabels(x) <- value fvlabelmap(x, dot=TRUE) fvlegend(object, elang) g3engine(x, y, z, box, rmax, nrval, correction) g3Cengine(x, y, z, box, rmax, nrval) getSumFun(abbreviation, classname, ismarked, fatal) good.correction.K(X) hasenvelope(X, E) implemented.for.K(correction, windowtype, explicit) is.atomicQtest(x) \method{is.poisson}{exactppm}(x) is.scov(x) \method{is.stationary}{exactppm}(x) k3engine(x, y, z, box, rmax, nrval, correction) Kborder.engine(X, rmax, nr, correction, weights, ratio) Knone.engine(X, rmax, nr, weights, ratio) Krect.engine(X, rmax, nr, correction, weights, ratio, fname, use.integers) Kount(dIJ, bI, b, breaks) Kwtsum(dIJ, bI, wIJ, b, w, breaks, fatal) localKengine(X, \dots, wantL, lambda, rmax, correction, verbose, rvalue) localKmultiEngine(X, from, to, lambdaFrom, lambdaTo, \dots, rmax, wantL, correction, verbose, rvalue, sigma, varcov, lambdaX, update, leaveoneout, Iexplain, Jexplain, Ikey, Jkey) localpcfengine(X, \dots, delta, rmax, nr, stoyan, lambda, rvalue) localpcfmatrix(X, i, \dots, lambda, delta, rmax, nr, stoyan) lookup2DkernelInfo(kernel) makefvlabel(op, accent, fname, sub, argname) maskLaslett(X, \dots, eps, dimyx, xy, rule.eps, oldX, verbose, plotit) match2DkernelName(kernel) mctestSigtraceEngine(R, devdata, devsim, \dots, interpolate, confint, alpha, exponent, unitname) meanlistfv(z, \dots) \method{names}{fv}(x) <- value nncleanEngine(kthNND, k, d, \dots, tol, maxit, plothist, lineargs, verbose, Xname) \method{pairs}{listof}(\dots, plot=TRUE) \method{pairs}{solist}(\dots, plot=TRUE) pcf3engine(x, y, z, box, rmax, nrval, correction, delta) pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, \dots, lambdaX, r, breaks, kernel, bw, adjust.bw, stoyan, correction, sigma, adjust.sigma, varcov, update, leaveoneout, Iname, Jname) \method{plot}{bw.frac}(x, \dots) \method{plot}{bw.optim}(x, \dots, showopt, optargs) \method{plot}{localpcfmatrix}(x, \dots) \method{plot}{plotpairsim}(x, \dots) \method{plot}{spatialcdf}(x, \dots, xlab, ylab, do.points) polyLaslett(X, \dots, oldX, verbose, plotit) \method{predict}{exactppm}(object, locations, \dots, eps, dimyx, rule.eps) prefixfv(x, tagprefix, descprefix, lablprefix, whichtags) \method{print}{bw.frac}(x, \dots) \method{print}{bw.optim}(x, \dots) \method{print}{densityfun}(x, \dots) \method{print}{envelope}(x, \dots) \method{print}{exactppm}(x, \dots) \method{print}{fasp}(x, \dots) \method{print}{fv}(x, \dots, tight) \method{print}{fvfun}(x, \dots) \method{print}{hasenvelope}(x, \dots) \method{print}{laslett}(x, \dots) \method{print}{localpcfmatrix}(x, \dots) \method{print}{plotpairsim}(x, \dots) \method{print}{quadrattest}(x, \dots) \method{print}{rat}(x, \dots) \method{print}{Smoothfun}(x, \dots) \method{print}{summary.bw.optim}(x, \dots, digits) \method{print}{summary.ssf}(x, \dots) quadrat.testEngine(X, nx, ny, alternative, method, conditional, CR, \dots, nsim, Xcount, xbreaks, ybreaks, tess, fit, df.est, Xname, fitname) ratfv(df, numer, denom, \dots, ratio) rebadge.as.crossfun(x, main, sub, i, j) rebadge.as.dotfun(x, main, sub, i) rebadge.fv(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp, new.dotnames, new.preferred, new.formula, new.tags) rebadgeLabels(x, new.fname) reconcile.fv(\dots) RelevantDeviation(x, alternative, clamp, scaling) rename.fv(x, fname, ylab, yexp) resolve.2D.kernel(\dots, sigma, varcov, x, mindist, adjust, bwfun, allow.zero) resolve.foxall.window(X, Y, W, warn.trim) resolve.lambda(X, lambda, \dots) \method{resolve.lambda}{ppp}(X, lambda, \dots, sigma, varcov, leaveoneout, update, check) resolve.lambdacross(X, I, J, lambdaI, lambdaJ, \dots) \method{resolve.lambdacross}{ppp}(X, I, J, lambdaI, lambdaJ, \dots, lambdaX, sigma, varcov, leaveoneout, update, lambdaIJ, Iexplain, Jexplain) resolve.reciplambda(X, lambda, reciplambda, \dots) \method{resolve.reciplambda}{ppp}(X, lambda, reciplambda, \dots, sigma, varcov, leaveoneout, update, check) resolveEinfo(x, what, fallback, warn, atomic) rhohatEngine(model, covariate, reference, volume, \dots, subset, weights, method, horvitz, smoother, resolution, spatCovarArgs, n, bw, adjust, from, to, bwref, covname, covunits, confidence, breaks, modelcall, callstring) rhohatCalc(ZX, Zvalues, lambda, denom, \dots, weights, lambdaX, method, horvitz, smoother, do.CI, n, bw, adjust, from, to, bwref, covname, confidence, breaks, positiveCI, markovCI, covunits, modelcall, callstring, savestuff) rmax.Rigid(X, g) rmax.rule(fun, W, lambda) rocData(covariate, nullmodel, \dots, high, p) rocModel(lambda, nullmodel, \dots, high, p) roseContinuous(ang, rad, unit, \dots, start, clockwise, main, labels, at, do.plot) scanmeasure(X, \dots) \method{scanmeasure}{ppp}(X, r, \dots, method) \method{scanmeasure}{im}(X, r, \dots) scanPoisLRTS(nZ, nG, muZ, muG, alternative) scanBinomLRTS(nZ, nG, muZ, muG, alternative) second.moment.calc(x, sigma, edge, what, \dots, varcov, expand, obswin, npts, debug) second.moment.engine(x, sigma, edge, what, \dots, kernel, scalekernel, kerpow, obswin, varcov, npts, debug, fastgauss) sewpcf(d, w, denargs, lambda2area, divisor) sewsmod(d, ff, wt, Ef, rvals, method="smrep", \dots, nwtsteps=500) \method{shift}{quadrattest}(X, \dots) simulrecipe(type, expr, envir, csr, pois, constraints) \method{StieltjesCalc}{fv}(M, f, \dots) \method{Smooth}{solist}(X, \dots) smoothcrossEngine(Xdata, Xquery, values, sigma, \dots, weights, varcov, kernel, scalekernel, sorted, cutoff) smoothpointsEngine(x, values, sigma, \dots, kernel, scalekernel, weights, varcov, leaveoneout, sorted, cutoff, debug) spatialCDFframe(model, covariate, \dots, jitter, covariateAtPoints, make.quantile.function) spatialCDFtest(model, covariate, test, \dots, dimyx, eps, rule.eps, interpolate, jitter, nsim, verbose, modelname, covname, dataname) spatialCDFtestCalc(fra, test, \dots, details) spatialCovariateEvidence(model, covariate, \dots) \method{spatialCovariateEvidence}{exactppm}(model, covariate, \dots, lambdatype, dimyx, eps, rule.eps, interpolate, jitter, jitterfactor, modelname, covname, dataname, subset, clip.predict) \method{spatialCovariateEvidence}{ppp}(model, covariate, \dots, lambdatype, dimyx, eps, rule.eps, interpolate, jitter, jitterfactor, modelname, covname, dataname, subset, clip.predict) sphere.volume(range, nval = 10) \method{summary}{bw.optim}(object, \dots) \method{summary}{envelope}(object,\dots) tweak.fv.entry(x, current.tag, new.labl, new.desc, new.tag) tweak.ratfv.entry(x, \dots) twostage.test(X, \dots, exponent, nsim, nsimsub, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, badXfatal, testblurb) twostage.envelope(X, \dots, nsim, nsimsub, nrank, alternative, reuse, leaveout, interpolate, savefuns, savepatterns, verbose, badXfatal, testlabel) updateData(model, X, \dots) \method{updateData}{default}(model, X, \dots, warn) validate2Dkernel(kernel, fatal) validate.angles(angles, unit, guess) validate.weights(x, recip, how, allowzero, allowinf) vanilla.fv(x) weightedclosepairs(X, r, correction, what) X2testEngine(OBS, EXP, \dots, method, CR, df, nsim, conditional, alternative, testname, dataname) } \details{ These internal \pkg{spatstat.explore} functions should not be called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat.explore} to the next. } \value{ The return values of these functions are not documented, and may change without warning. } \keyword{internal} spatstat.explore/man/SmoothHeat.ppp.Rd0000644000176200001440000000342414700374645017522 0ustar liggesusers\name{SmoothHeat.ppp} \alias{SmoothHeat.ppp} \title{Spatial Smoothing of Observations using Diffusion Estimate of Density} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations, using the diffusion estimate of the density. } \usage{ \method{SmoothHeat}{ppp}(X, sigma, \dots, weights=NULL) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) with numeric marks. } \item{sigma}{ Smoothing bandwidth. A single number giving the equivalent standard deviation of the smoother. } \item{\dots}{ Arguments passed to \code{\link[spatstat.explore]{densityHeat}} controlling the estimation of each marginal intensity, or passed to \code{\link[spatstat.geom]{pixellate.ppp}} controlling the pixel resolution. } \item{weights}{Optional numeric vector of weights associated with each data point. } } \details{ This is the analogue of the Nadaraya-Watson smoother, using the diffusion smoothing estimation procedure (Baddeley et al, 2022). The numerator and denominator of the Nadaraya-Watson smoother are calculated using \code{\link[spatstat.explore]{densityHeat.ppp}}. } \value{ Pixel image (object of class \code{"im"}) giving the smoothed mark value. } \seealso{ \code{\link[spatstat.explore]{Smooth.ppp}} for the usual kernel-based smoother (the Nadaraya-Watson smoother) and \code{\link[spatstat.explore]{densityHeat}} for the diffusion estimate of density. } \author{ \adrian, \tilman and Suman Rakshit. } \examples{ plot(SmoothHeat(longleaf, 10)) } \references{ Baddeley, A., Davies, T., Rakshit, S., Nair, G. and McSwiggan, G. (2022) Diffusion smoothing for spatial point patterns. \emph{Statistical Science} \bold{37}, 123--142. } \keyword{spatial} \keyword{smooth} spatstat.explore/man/pcfdot.Rd0000644000176200001440000001430314611073324016115 0ustar liggesusers\name{pcfdot} \alias{pcfdot} \title{Multitype pair correlation function (i-to-any)} \description{ Calculates an estimate of the multitype pair correlation function (from points of type \code{i} to points of any type) for a multitype point pattern. } \usage{ pcfdot(X, i, \dots, r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("isotropic", "Ripley", "translate"), divisor = c("r", "d"), ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type pair correlation function \eqn{g_{i\bullet}(r)}{gdot[i](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Ignored. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{stoyan}{ Coefficient for default bandwidth rule; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a nonzero distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of any type at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda g_{i\bullet}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda * gdot[i](r) dx dy } where \eqn{\lambda}{lambda} is the intensity of all points, and \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda}{p(r) = lambda[i] * lambda} so \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = 1}. For a stationary multitype point process, the type-\code{i}-to-any-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i\bullet}(r) = \frac{K_{i\bullet}^\prime(r)}{2\pi r} }{ g(r) = Kdot[i]'(r)/ ( 2 * pi * r) } where \eqn{K_{i\bullet}^\prime}{Kdot[i]'(r)} is the derivative of the type-\code{i}-to-any-type \eqn{K} function \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. of the point process. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. The command \code{pcfdot} computes a kernel estimate of the multitype pair correlation function from points of type \eqn{i} to points of any type. \itemize{ \item If \code{divisor="r"} (the default), then the multitype counterpart of the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \code{correction="translate"} is the Ohser-Stoyan translation correction, and \code{correction="isotropic"} or \code{"Ripley"} is Ripley's isotropic correction. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel. The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with support \eqn{[-h,h]}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. If \code{bw} is not specified, the default bandwidth is determined by Stoyan's rule of thumb (Stoyan and Stoyan, 1994, page 285). That is, \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the unmarked point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. The argument \code{stoyan} determines the value of \eqn{c}. The companion function \code{\link{pcfcross}} computes the corresponding analogue of \code{\link{Kcross}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}}{gdot[i]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfcross}}, \code{\link{pcfmulti}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kdot}} } \examples{ p <- pcfdot(amacrine, "on") p <- pcfdot(amacrine, "on", stoyan=0.1) plot(p) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Gcross.inhom.Rd0000644000176200001440000001514614611073322017213 0ustar liggesusers\name{Gcross.inhom} \alias{Gcross.inhom} \title{ Inhomogeneous Multitype G Cross Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross \eqn{G} function, which is the distribution of the distance from a point of type \eqn{i} to the nearest point of type \eqn{j}, adjusted for spatially varying intensity. } \usage{ Gcross.inhom(X, i, j, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{G} function \eqn{G_{ij}(r)}{Gij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambda}{ Optional. Values of the estimated intensity of the point process. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdamin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution for the computation. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{G} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}) containing estimates of the inhomogeneous cross type \eqn{G} function. } \details{ This is a generalisation of the function \code{\link{Gcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Ginhom}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. Similarly the argument \code{lambdaJ} should contain estimated values of the intensity of the points of type \eqn{j}. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Gcross}}, \code{\link{Ginhom}}, \code{\link{Gcross.inhom}}, \code{\link{Gmulti.inhom}}. } \examples{ X <- rescale(amacrine) if(interactive() && require(spatstat.model)) { ## how to do it normally mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 } else { ## for package testing lam <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 } GC <- Gcross.inhom(X, "on", "off", lambda=lam, lambdamin=lmin) } \author{ \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/edge.Trans.Rd0000644000176200001440000001134114643125461016634 0ustar liggesusers\name{edge.Trans} \alias{edge.Trans} \alias{rmax.Trans} \title{ Translation Edge Correction } \description{ Computes Ohser and Stoyan's translation edge correction weights for a point pattern. } \usage{ edge.Trans(X, Y = X, W = Window(X), exact = FALSE, paired = FALSE, ..., trim = spatstat.options("maxedgewt"), dx=NULL, dy=NULL, give.rmax=FALSE, gW=NULL) rmax.Trans(W, g=setcov(W)) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{exact}{ Logical. If \code{TRUE}, a slow algorithm will be used to compute the exact value. If \code{FALSE}, a fast algorithm will be used to compute the approximate value. } \item{paired}{ Logical value indicating whether \code{X} and \code{Y} are paired. If \code{TRUE}, compute the edge correction for corresponding points \code{X[i], Y[i]} for all \code{i}. If \code{FALSE}, compute the edge correction for each possible pair of points \code{X[i], Y[j]} for all \code{i} and \code{j}. } \item{\dots}{Ignored.} \item{trim}{ Maximum permitted value of the edge correction weight. } \item{dx,dy}{ Alternative data giving the \eqn{x} and \eqn{y} coordinates of the vector differences between the points. Incompatible with \code{X} and \code{Y}. See Details. } \item{give.rmax}{ Logical. If \code{TRUE}, also compute the value of \code{rmax.Trans(W)} and return it as an attribute of the result. } \item{g, gW}{ Optional. Set covariance of \code{W}, if it has already been computed. Not required if \code{W} is a rectangle. } } \details{ The function \code{edge.Trans} computes Ohser and Stoyan's translation edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. The function \code{rmax.Trans} computes the maximum value of distance \eqn{r} for which the translation edge correction estimate of \eqn{K(r)} is valid. For a pair of points \eqn{x} and \eqn{y} in a window \eqn{W}, the translation edge correction weight is \deqn{ e(u, r) = \frac{\mbox{area}(W)}{\mbox{area}(W \cap (W + y - x))} }{ e(u, r) = area(W) / area(intersect(W, W + y - x)) } where \eqn{W + y - x} is the result of shifting the window \eqn{W} by the vector \eqn{y - x}. The denominator is the area of the overlap between this shifted window and the original window. The function \code{edge.Trans} computes this edge correction weight. If \code{paired=TRUE}, then \code{X} and \code{Y} should contain the same number of points. The result is a vector containing the edge correction weights \code{e(X[i], Y[i])} for each \code{i}. If \code{paired=FALSE}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], Y[j])}. Computation is exact if the window is a rectangle. Otherwise, \itemize{ \item if \code{exact=TRUE}, the edge correction weights are computed exactly using \code{\link[spatstat.geom]{overlap.owin}}, which can be quite slow. \item if \code{exact=FALSE} (the default), the weights are computed rapidly by evaluating the set covariance function \code{\link[spatstat.geom]{setcov}} using the Fast Fourier Transform. } If any value of the edge correction weight exceeds \code{trim}, it is set to \code{trim}. The arguments \code{dx} and \code{dy} can be provided as an alternative to \code{X} and \code{Y}. If \code{paired=TRUE} then \code{dx,dy} should be vectors of equal length such that the vector difference of the \eqn{i}th pair is \code{c(dx[i], dy[i])}. If \code{paired=FALSE} then \code{dx,dy} should be matrices of the same dimensions, such that the vector difference between \code{X[i]} and \code{Y[j]} is \code{c(dx[i,j], dy[i,j])}. The argument \code{W} is needed. The value of \code{rmax.Trans} is the shortest distance from the origin \eqn{(0,0)} to the boundary of the support of the set covariance function of \code{W}. It is computed by pixel approximation using \code{\link[spatstat.geom]{setcov}}, unless \code{W} is a rectangle, when \code{rmax.Trans(W)} is the length of the shortest side of the rectangle. } \value{ Numeric vector or matrix. } \references{ Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. } \seealso{ \code{\link{rmax.Trans}}, \code{\link{edge.Ripley}}, \code{\link[spatstat.geom]{setcov}}, \code{\link{Kest}} } \examples{ v <- edge.Trans(cells) rmax.Trans(Window(cells)) } \author{\adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pcf.Rd0000644000176200001440000000714114611073324015410 0ustar liggesusers\name{pcf} \alias{pcf} \title{Pair Correlation Function} \description{ Estimate the pair correlation function. } \usage{ pcf(X, \dots) } \arguments{ \item{X}{ Either the observed data point pattern, or an estimate of its \eqn{K} function, or an array of multitype \eqn{K} functions (see Details). } \item{\dots}{ Other arguments passed to the appropriate method. } } \value{ Either a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function, or a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} either directly from a point pattern, or indirectly from an estimate of \eqn{K(r)} or one of its variants. This function is generic, with methods for the classes \code{"ppp"}, \code{"fv"} and \code{"fasp"}. If \code{X} is a point pattern (object of class \code{"ppp"}) then the pair correlation function is estimated using a traditional kernel smoothing method (Stoyan and Stoyan, 1994). See \code{\link{pcf.ppp}} for details. If \code{X} is a function value table (object of class \code{"fv"}), then it is assumed to contain estimates of the \eqn{K} function or one of its variants (typically obtained from \code{\link{Kest}} or \code{\link{Kinhom}}). This routine computes an estimate of \eqn{g(r)} using smoothing splines to approximate the derivative. See \code{\link{pcf.fv}} for details. If \code{X} is a function value array (object of class \code{"fasp"}), then it is assumed to contain estimates of several \eqn{K} functions (typically obtained from \code{\link{Kmulti}} or \code{\link{alltypes}}). This routine computes an estimate of \eqn{g(r)} for each cell in the array, using smoothing splines to approximate the derivatives. See \code{\link{pcf.fasp}} for details. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcf.fv}}, \code{\link{pcf.fasp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}} } \examples{ # ppp object X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p) # fv object K <- Kest(X) p2 <- pcf(K, spar=0.8, method="b") plot(p2) # multitype pattern; fasp object amaK <- alltypes(amacrine, "K") amap <- pcf(amaK, spar=1, method="b") plot(amap) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/plot.quadrattest.Rd0000644000176200001440000000323114643125462020160 0ustar liggesusers\name{plot.quadrattest} \alias{plot.quadrattest} \title{ Display the result of a quadrat counting test. } \description{ Given the result of a quadrat counting test, graphically display the quadrats that were used, the observed and expected counts, and the residual in each quadrat. } \usage{ \method{plot}{quadrattest}(x, ..., textargs=list()) } \arguments{ \item{x}{ Object of class \code{"quadrattest"} containing the result of \code{\link{quadrat.test}}. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.geom]{plot.tess}} to control the display of the quadrats. } \item{textargs}{ List of additional arguments passed to \code{\link[graphics]{text.default}} to control the appearance of the text. } } \details{ This is the plot method for objects of class \code{"quadrattest"}. Such an object is produced by \code{\link{quadrat.test}} and represents the result of a \eqn{\chi^2}{chi^2} test for a spatial point pattern. The quadrats are first plotted using \code{\link[spatstat.geom]{plot.tess}}. Then in each quadrat, the observed and expected counts and the Pearson residual are displayed as text using \code{\link[graphics]{text.default}}. Observed count is displayed at top left; expected count at top right; and Pearson residual at bottom. } \value{ Null. } \examples{ plot(quadrat.test(swedishpines, 3)) } \seealso{ \code{\link{quadrat.test}}, \code{\link[spatstat.geom]{plot.tess}}, \code{\link[graphics]{text.default}}, \code{\link[spatstat.geom]{plot.quadratcount}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \concept{Goodness-of-fit} spatstat.explore/man/bw.diggle.Rd0000644000176200001440000000771114611073323016504 0ustar liggesusers\name{bw.diggle} \alias{bw.diggle} \title{ Cross Validated Bandwidth Selection for Kernel Density } \description{ Uses cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.diggle(X, ..., correction="good", hmax=NULL, nr=512, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string passed to \code{\link{Kest}} determining the edge correction to be used to calculate the \eqn{K} function. } \item{hmax}{ Numeric. Maximum value of bandwidth that should be considered. } \item{nr}{ Integer. Number of steps in the distance value \eqn{r} to use in computing numerical integrals. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the mean-square error criterion defined by Diggle (1985). The algorithm uses the method of Berman and Diggle (1989) to compute the quantity \deqn{ M(\sigma) = \frac{\mbox{MSE}(\sigma)}{\lambda^2} - g(0) }{ M(\sigma) = MSE(\sigma)/\lambda^2 - g(0) } as a function of bandwidth \eqn{\sigma}{\sigma}, where \eqn{\mbox{MSE}(\sigma)}{MSE(\sigma)} is the mean squared error at bandwidth \eqn{\sigma}{\sigma}, while \eqn{\lambda}{\lambda} is the mean intensity, and \eqn{g} is the pair correlation function. See Diggle (2003, pages 115-118) for a summary of this method. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \section{Definition of bandwidth}{ The smoothing parameter \code{sigma} returned by \code{bw.diggle} (and displayed on the horizontal axis of the plot) corresponds to \code{h/2}, where \code{h} is the smoothing parameter described in Diggle (2003, pages 116-118) and Berman and Diggle (1989). In those references, the smoothing kernel is the uniform density on the disc of radius \code{h}. In \code{\link{density.ppp}}, the smoothing kernel is the isotropic Gaussian density with standard deviation \code{sigma}. When replacing one kernel by another, the usual practice is to adjust the bandwidths so that the kernels have equal variance (cf. Diggle 2003, page 118). This implies that \code{sigma = h/2}. } \value{ A single numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \seealso{ \code{\link{density.ppp}}, \code{\link[spatstat.explore]{bw.optim.object}}. Alternative methods: \code{\link{bw.ppl}}, \code{\link{bw.scott}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ attach(split(lansing)) b <- bw.diggle(hickory) plot(b, ylim=c(-2, 0), main="Cross validation for hickories") if(interactive()) { plot(density(hickory, b)) } } \references{ Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order intensity of a spatial point process. \emph{Journal of the Royal Statistical Society, series B} \bold{51}, 81--92. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/increment.fv.Rd0000644000176200001440000000166514611073324017243 0ustar liggesusers\name{increment.fv} \alias{increment.fv} \title{ Increments of a Function } \description{ Compute the change in the value of a function \code{f} when the function argument increases by \code{delta}. } \usage{ increment.fv(f, delta) } \arguments{ \item{f}{ Object of class \code{"fv"} representing a function. } \item{delta}{ Numeric. The increase in the value of the function argument. } } \details{ This command computes the new function \deqn{g(x) = f(x+h) - f(x-h)} where \code{h = delta/2}. The value of \eqn{g(x)} is the change in the value of \eqn{f} over an interval of length \code{delta} centred at \eqn{x}. } \value{ Another object of class \code{"fv"} compatible with \code{X}. } \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv.object}}, \code{\link{deriv.fv}} } \examples{ plot(increment.fv(Kest(cells), 0.05)) } \keyword{spatial} \keyword{math} \keyword{nonparametric} spatstat.explore/man/as.data.frame.envelope.Rd0000644000176200001440000000265114611073323021060 0ustar liggesusers\name{as.data.frame.envelope} \alias{as.data.frame.envelope} \title{Coerce Envelope to Data Frame} \description{ Converts an envelope object to a data frame. } \usage{ \method{as.data.frame}{envelope}(x, \dots, simfuns=FALSE) } \arguments{ \item{x}{Envelope object (class \code{"envelope"}).} \item{\dots}{Ignored.} \item{simfuns}{Logical value indicating whether the result should include the values of the simulated functions that were used to build the envelope. } } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of envelopes (see \code{\link{envelope}}. The result is a data frame with columns containing the values of the function argument (usually named \code{r}), the function estimate for the original point pattern data (\code{obs}), the upper and lower envelope limits (\code{hi} and \code{lo}), and possibly additional columns. If \code{simfuns=TRUE}, the result also includes columns of values of the simulated functions that were used to compute the envelope. This is possible only when the envelope was computed with the argument \code{savefuns=TRUE} in the call to \code{\link{envelope}}. } \value{ A data frame. } \examples{ E <- envelope(cells, nsim=5, savefuns=TRUE) tail(as.data.frame(E)) tail(as.data.frame(E, simfuns=TRUE)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \concept{Envelope of simulations} spatstat.explore/man/nnorient.Rd0000644000176200001440000000675514611073324016506 0ustar liggesusers\name{nnorient} \alias{nnorient} \title{ Nearest Neighbour Orientation Distribution } \description{ Computes the distribution of the orientation of the vectors from each point to its nearest neighbour. } \usage{ nnorient(X, \dots, cumulative = FALSE, correction, k = 1, unit = c("degree", "radian"), domain = NULL, ratio = FALSE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{circdensity}} to control the kernel smoothing, if \code{cumulative=FALSE}. } \item{cumulative}{ Logical value specifying whether to estimate the probability density (\code{cumulative=FALSE}, the default) or the cumulative distribution function (\code{cumulative=TRUE}). } \item{correction}{ Character vector specifying edge correction or corrections. Options are \code{"none"}, \code{"bord.modif"}, \code{"good"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } \item{k}{ Integer. The \eqn{k}th nearest neighbour will be used. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{unit}{ Unit in which the angles should be expressed. Either \code{"degree"} or \code{"radian"}. } \item{domain}{ Optional window. The first point \eqn{x_i}{x[i]} of each pair of points will be constrained to lie in \code{domain}. } } \details{ This algorithm considers each point in the pattern \code{X} and finds its nearest neighbour (or \eqn{k}th nearest neighour). The \emph{direction} of the arrow joining the data point to its neighbour is measured, as an angle in degrees or radians, anticlockwise from the \eqn{x} axis. If \code{cumulative=FALSE} (the default), a kernel estimate of the probability density of the angles is calculated using \code{\link{circdensity}}. This is the function \eqn{\vartheta(\phi)}{theta(phi)} defined in Illian et al (2008), equation (4.5.3), page 253. If \code{cumulative=TRUE}, then the cumulative distribution function of these angles is calculated. In either case the result can be plotted as a rose diagram by \code{\link{rose}}, or as a function plot by \code{\link{plot.fv}}. The algorithm gives each observed direction a weight, determined by an edge correction, to adjust for the fact that some interpoint distances are more likely to be observed than others. The choice of edge correction or corrections is determined by the argument \code{correction}. It is also possible to calculate an estimate of the probability density from the cumulative distribution function, by numerical differentiation. Use \code{\link{deriv.fv}} with the argument \code{Dperiodic=TRUE}. } \value{ A function value table (object of class \code{"fv"}) containing the estimates of the probability density or the cumulative distribution function of angles, in degrees (if \code{unit="degree"}) or radians (if \code{unit="radian"}). } \references{ Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. } \seealso{ \code{\link{pairorient}} } \examples{ rose(nnorient(redwood, adjust=0.6), col="grey") plot(CDF <- nnorient(redwood, cumulative=TRUE)) } \author{\adrian \rolf and \ege } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bw.CvLHeat.Rd0000644000176200001440000000444414611073322016536 0ustar liggesusers\name{bw.CvLHeat} \alias{bw.CvLHeat} \title{ Bandwidth Selection for Diffusion Smoother by Cronie-van Lieshout Rule } \description{ Selects an optimal bandwidth for diffusion smoothing using the Cronie-van Lieshout rule. } \usage{ bw.CvLHeat(X, \dots, srange=NULL, ns=16, sigma=NULL, leaveoneout=TRUE, verbose = TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{densityHeat.ppp}}. } \item{srange}{ Numeric vector of length 2 specifying a range of bandwidths to be considered. } \item{ns}{ Integer. Number of candidate bandwidths to be considered. } \item{sigma}{ Maximum smoothing bandwidth. A numeric value, or a pixel image, or a \code{function(x,y)}. Alternatively a numeric vector containing a sequence of candidate bandwidths. } \item{leaveoneout}{ Logical value specifying whether intensity values at data points should be estimated using the leave-one-out rule. } \item{verbose}{ Logical value specifying whether to print progress reports. } } \details{ This algorithm selects the optimal global bandwidth for kernel estimation of intensity for the dataset \code{X} using diffusion smoothing \code{\link{densityHeat.ppp}}. If \code{sigma} is a numeric value, the algorithm finds the optimal bandwidth \code{tau <= sigma}. If \code{sigma} is a pixel image or function, the algorithm finds the optimal fraction \code{0 < f <= 1} such that smoothing with \code{f * sigma} would be optimal. } \value{ A numerical value giving the selected bandwidth (if \code{sigma} was a numeric value) or the selected fraction of the maximum bandwidth (if \code{sigma} was a pixel image or function). The result also belongs to the class \code{"bw.optim"} which can be plotted. } \author{ Adrian Baddeley. } \seealso{ \code{\link{bw.pplHeat}} for an alternative method. \code{\link{densityHeat.ppp}} } \examples{ online <- interactive() if(!online) op <- spatstat.options(npixel=32) f <- function(x,y) { dnorm(x, 2.3, 0.1) * dnorm(y, 2.0, 0.2) } X <- rpoint(15, f, win=letterR) plot(X) b <- bw.CvLHeat(X, sigma=0.25) b plot(b) if(!online) spatstat.options(op) } \keyword{spatial} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/sharpen.Rd0000644000176200001440000000642314611073325016303 0ustar liggesusers\name{sharpen} \alias{sharpen} \alias{sharpen.ppp} \title{Data Sharpening of Point Pattern} \description{ Performs Choi-Hall data sharpening of a spatial point pattern. } \usage{ sharpen(X, \dots) \method{sharpen}{ppp}(X, sigma=NULL, \dots, varcov=NULL, edgecorrect=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } \item{edgecorrect}{ Logical value indicating whether to apply edge effect bias correction. } \item{\dots}{Arguments passed to \code{\link{density.ppp}} to control the pixel resolution of the result.} } \details{ Choi and Hall (2001) proposed a procedure for \emph{data sharpening} of spatial point patterns. This procedure is appropriate for earthquake epicentres and other point patterns which are believed to exhibit strong concentrations of points along a curve. Data sharpening causes such points to concentrate more tightly along the curve. If the original data points are \eqn{X_1, \ldots, X_n}{X[1],..., X[n]} then the sharpened points are \deqn{ \hat X_i = \frac{\sum_j X_j k(X_j-X_i)}{\sum_j k(X_j - X_i)} }{ X^[i] = (sum[j] X[j] * k(X[j] - X[i]))/(sum[j] k(X[j] - X[i])) } where \eqn{k} is a smoothing kernel in two dimensions. Thus, the new point \eqn{\hat X_i}{X^[i]} is a vector average of the nearby points \eqn{X[j]}. The function \code{sharpen} is generic. It currently has only one method, for two-dimensional point patterns (objects of class \code{"ppp"}). If \code{sigma} is given, the smoothing kernel is the isotropic two-dimensional Gaussian density with standard deviation \code{sigma} in each axis. If \code{varcov} is given, the smoothing kernel is the Gaussian density with variance-covariance matrix \code{varcov}. The data sharpening procedure tends to cause the point pattern to contract away from the boundary of the window. That is, points \eqn{X_i}{X[i]} that lie `quite close to the edge of the window of the point pattern tend to be displaced inward. If \code{edgecorrect=TRUE} then the algorithm is modified to correct this vector bias. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as the original pattern \code{X}, and with the same marks as \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ X <- unmark(shapley) \dontshow{ if(!(interactive())) X <- rthin(X, 0.05) } Y <- sharpen(X, sigma=0.5) Z <- sharpen(X, sigma=0.5, edgecorrect=TRUE) opa <- par(mar=rep(0.2, 4)) plot(solist(X, Y, Z), main= " ", main.panel=c("data", "sharpen", "sharpen, correct"), pch=".", equal.scales=TRUE, mar.panel=0.2) par(opa) } \references{ Choi, E. and Hall, P. (2001) Nonparametric analysis of earthquake point-process data. In M. de Gunst, C. Klaassen and A. van der Vaart (eds.) \emph{State of the art in probability and statistics: Festschrift for Willem R. van Zwet}, Institute of Mathematical Statistics, Beachwood, Ohio. Pages 324--344. } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/reload.or.compute.Rd0000644000176200001440000000750114737444216020213 0ustar liggesusers\name{reload.or.compute} \alias{reload.or.compute} \title{ Perform Computations or Retrieve Results From File } \description{ This utility either performs computations and saves the results in a file, or retrieves the results of previous computations stored in a file. If the designated file does not yet exist, the expression will be evaluated, and the results will be saved in the file. If the file already exists, the results will be re-loaded from the file. } \usage{ reload.or.compute(filename, expr, objects = NULL, context = parent.frame(), destination = parent.frame(), force=FALSE, verbose=TRUE, exclude=NULL) } \arguments{ \item{filename}{ Name of data file. A character string. } \item{expr}{ \R language expression to be evaluated. } \item{objects}{ Optional character vector of names of objects to be saved in \code{filename} after evaluating \code{expr}, or names of objects that should be present in \code{filename} when loaded. } \item{exclude}{ Optional character vector of names of objects that should \bold{not} be saved in \code{filename} and are \bold{not} expected to be present in \code{filename}. } \item{context}{ Environment containing objects that are mentioned in \code{expr} (other than objects in the global environment). } \item{destination}{ Environment into which the resulting objects should be assigned. } \item{force}{ Logical value indicating whether to perform the computation in any case. } \item{verbose}{ Logical value indicating whether to print a message indicating whether the data were recomputed or reloaded from the file. } } \details{ This facility is useful for saving, and later re-loading, the results of time-consuming computations. It would typically be used in an \R script file or an \code{\link[utils]{Sweave}} document. If the file called \code{filename} does not yet exist (or if \code{force=TRUE}), then \code{expr} will be evaluated and the results will be saved in \code{filename} using \code{\link[base]{save}}. By default, all objects that were created by evaluating the expression will be saved in the file. The optional argument \code{objects} specifies which results should be saved to the file. The optional argument \code{exclude} specifies results which should \emph{not} be saved. If the file called \code{filename} already exists (and if \code{force=FALSE}, the default), then this file will be loaded into \R using \code{\link{load}}. The optional argument \code{objects} specifies the names of objects that must be present in the file; a warning is issued if any of them are missing. The resulting objects (either evaluated or loaded from file) can be assigned into any desired \code{destination} environment. The default behaviour is equivalent to evaluating \code{expr} in the current environment. If \code{force=TRUE} then \code{expr} will be evaluated (regardless of whether the file already exists or not) and the results will be saved in \code{filename}, overwriting any previously-existing file with that name. This is a convenient way to force the code to re-compute everything in an \R script file or \code{\link[utils]{Sweave}} document. } \value{ Character vector (invisible) giving the names of the objects computed or loaded. } \examples{ ## Demonstration using a temporary file ## (For real applications, use a permanent file in your own filespace) myfile <- paste0(tempdir(), .Platform$file.sep, "mydata.rda") reload.or.compute(myfile, { # some very long computation ending with .. x <- 42 intermediateWorking <- 12345 y <- sqrt(x) }, exclude="intermediateWorking") ## the values x and y are saved } \author{ \adrian and \rolf. } \keyword{utilities} spatstat.explore/man/envelope.Rd0000644000176200001440000007463614650323373016477 0ustar liggesusers\name{envelope} \alias{envelope} \alias{envelope.ppp} \title{Simulation Envelopes of Summary Function} \description{ Computes simulation envelopes of a summary function. } \usage{ envelope(Y, fun, \dots) \method{envelope}{ppp}(Y, fun=Kest, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or \code{"slrm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. This option is currently not available for \code{envelope.kppm}. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. This option is currently not available for \code{envelope.kppm}. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{clipdata}{ Logical flag indicating whether the data point pattern should be clipped to the same window as the simulated patterns, before the summary function for the data is computed. This should usually be \code{TRUE} to ensure that the data and simulations are properly comparable. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. Default is \code{use.theory=TRUE} if \code{Y} is a point pattern, or a point process model equivalent to Complete Spatial Randomness, and \code{use.theory=FALSE} otherwise. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are only rejected when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ An object of class \code{"envelope"} and \code{"fv"}, see \code{\link[spatstat.explore]{fv.object}}, which can be printed and plotted directly. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the summary function \code{fun} has been estimated } \item{obs}{ values of the summary function for the data point pattern } \item{lo}{ lower envelope of simulations } \item{hi}{ upper envelope of simulations } and \emph{either} \item{theo}{ theoretical value of the summary function under CSR (Complete Spatial Randomness, a uniform Poisson point process) if the simulations were generated according to CSR } \item{mmean}{ estimated theoretical value of the summary function, computed by averaging simulated values, if the simulations were not generated according to CSR. } Additionally, if \code{savepatterns=TRUE}, the return value has an attribute \code{"simpatterns"} which is a list containing the \code{nsim} simulated patterns. If \code{savefuns=TRUE}, the return value has an attribute \code{"simfuns"} which is an object of class \code{"fv"} containing the summary functions computed for each of the \code{nsim} simulated patterns. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. For the most basic use, if you have a point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function for \code{X} plotted together with the envelopes of the \eqn{K} function for 39 simulations of CSR. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"}, \code{"kppm"} and \code{"slrm"} described here. There are also methods for the classes \code{"pp3"}, \code{"lpp"} and \code{"lppm"} which are described separately under \code{\link[spatstat.explore]{envelope.pp3}} and \code{envelope.lpp}. Envelopes can also be computed from other envelopes, using \code{\link[spatstat.explore]{envelope.envelope}}. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{Y} is a point pattern (an object of class \code{"ppp"}) and \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. (If \code{Y} is a multitype point pattern, then the simulated patterns are also given independent random marks; the probability distribution of the random marks is determined by the relative frequencies of marks in \code{Y}.) \item If \code{Y} is a fitted point process model (an object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"}) and \code{simulate=NULL}, then this routine generates \code{nsim} simulated realisations of that model. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. It may be either \itemize{ \item an expression in the R language, typically containing a call to a random generator. This expression will be evaluated \code{nsim} times to yield \code{nsim} point patterns. For example if \code{simulate=expression(runifpoint(100))} then each simulated pattern consists of exactly 100 independent uniform random points. \item a function in the R language, typically containing a call to a random generator. This function will be applied repeatedly to the original data pattern \code{Y} to yield \code{nsim} point patterns. For example if \code{simulate=\link[spatstat.random]{rlabel}} then each simulated pattern was generated by evaluating \code{\link[spatstat.random]{rlabel}(Y)} and consists of a randomly-relabelled version of \code{Y}. \item a list of point patterns. The entries in this list will be taken as the simulated patterns. \item an object of class \code{"envelope"}. This should have been produced by calling \code{envelope} with the argument \code{savepatterns=TRUE}. The simulated point patterns that were saved in this object will be extracted and used as the simulated patterns for the new envelope computation. This makes it possible to plot envelopes for two different summary functions based on exactly the same set of simulated point patterns. } } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf}, \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot}, \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti}, \code{Jmulti} or \code{Kinhom}. It may also be a character string containing the name of one of these functions. The statistic \code{fun} can also be a user-supplied function; if so, then it must have arguments \code{X} and \code{r} like those in the functions listed above, and it must return an object of class \code{"fv"}. Upper and lower critical envelopes are computed in one of the following ways: \describe{ \item{pointwise:}{by default, envelopes are calculated pointwise (i.e. for each value of the distance argument \eqn{r}), by sorting the \code{nsim} simulated values, and taking the \code{m}-th lowest and \code{m}-th highest values, where \code{m = nrank}. For example if \code{nrank=1}, the upper and lower envelopes are the pointwise maximum and minimum of the simulated values. The pointwise envelopes are \bold{not} \dQuote{confidence bands} for the true value of the function! Rather, they specify the critical points for a Monte Carlo test (Ripley, 1981). The test is constructed by choosing a \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the observed function value lies outside the envelope \emph{at this value of} \eqn{r}. This test has exact significance level \code{alpha = 2 * nrank/(1 + nsim)}. } \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are determined as follows. First we calculate the theoretical mean value of the summary statistic (if we are testing CSR, the theoretical value is supplied by \code{fun}; otherwise we perform a separate set of \code{nsim2} simulations, compute the average of all these simulated values, and take this average as an estimate of the theoretical mean value). Then, for each simulation, we compare the simulated curve to the theoretical curve, and compute the maximum absolute difference between them (over the interval of \eqn{r} values specified by \code{ginterval}). This gives a deviation value \eqn{d_i}{d[i]} for each of the \code{nsim} simulations. Finally we take the \code{m}-th largest of the deviation values, where \code{m=nrank}, and call this \code{dcrit}. Then the simultaneous envelopes are of the form \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where \code{expected} is either the theoretical mean value \code{theo} (if we are testing CSR) or the estimated theoretical value \code{mmean} (if we are testing another model). The simultaneous critical envelopes have constant width \code{2 * dcrit}. The simultaneous critical envelopes allow us to perform a different Monte Carlo test (Ripley, 1981). The test rejects the null hypothesis if the graph of the observed function lies outside the envelope \bold{at any value of} \eqn{r}. This test has exact significance level \code{alpha = nrank/(1 + nsim)}. This test can also be performed using \code{\link[spatstat.explore]{mad.test}}. } \item{based on sample moments:}{if \code{VARIANCE=TRUE}, the algorithm calculates the (pointwise) sample mean and sample variance of the simulated functions. Then the envelopes are computed as mean plus or minus \code{nSD} standard deviations. These envelopes do not have an exact significance interpretation. They are a naive approximation to the critical points of the Neyman-Pearson test assuming the summary statistic is approximately Normally distributed. } } The return value is an object of class \code{"fv"} containing the summary function for the data point pattern, the upper and lower simulation envelopes, and the theoretical expected value (exact or estimated) of the summary function for the model being tested. It can be plotted using \code{\link[spatstat.explore]{plot.envelope}}. If \code{VARIANCE=TRUE} then the return value also includes the sample mean, sample variance and other quantities. Arguments can be passed to the function \code{fun} through \code{...}. This means that you simply specify these arguments in the call to \code{envelope}, and they will be passed to \code{fun}. In particular, the argument \code{correction} determines the edge correction to be used to calculate the summary statistic. See the section on Edge Corrections, and the Examples. Arguments can also be passed to the function \code{fun} through the list \code{funargs}. This mechanism is typically used if an argument of \code{fun} has the same name as an argument of \code{envelope}. The list \code{funargs} should contain entries of the form \code{name=value}, where each \code{name} is the name of an argument of \code{fun}. There is also an option, rarely used, in which different function arguments are used when computing the summary function for the data \code{Y} and for the simulated patterns. If \code{funYargs} is given, it will be used when the summary function for the data \code{Y} is computed, while \code{funargs} will be used when computing the summary function for the simulated patterns. This option is only needed in rare cases: usually the basic principle requires that the data and simulated patterns must be treated equally, so that \code{funargs} and \code{funYargs} should be identical. If \code{Y} is a fitted cluster point process model (object of class \code{"kppm"}), and \code{simulate=NULL}, then the model is simulated directly using \code{\link[spatstat.model]{simulate.kppm}}. If \code{Y} is a fitted Gibbs point process model (object of class \code{"ppm"}), and \code{simulate=NULL}, then the model is simulated by running the Metropolis-Hastings algorithm \code{\link[spatstat.random]{rmh}}. Complete control over this algorithm is provided by the arguments \code{start} and \code{control} which are passed to \code{\link[spatstat.random]{rmh}}. For simultaneous critical envelopes (\code{global=TRUE}) the following options are also useful: \describe{ \item{\code{ginterval}}{determines the interval of \eqn{r} values over which the deviation between curves is calculated. It should be a numeric vector of length 2. There is a sensible default (namely, the recommended plotting interval for \code{fun(X)}, or the range of \code{r} values if \code{r} is explicitly specified). } \item{\code{transform}}{specifies a transformation of the summary function \code{fun} that will be carried out before the deviations are computed. Such transforms are useful if \code{global=TRUE} or \code{VARIANCE=TRUE}. The \code{transform} must be an expression object using the symbol \code{.} to represent the function value (and possibly other symbols recognised by \code{\link[spatstat.explore]{with.fv}}). For example, the conventional way to normalise the \eqn{K} function (Ripley, 1981) is to transform it to the \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/\pi)} and this is implemented by setting \code{transform=expression(sqrt(./pi))}. } } It is also possible to extract the summary functions for each of the individual simulated point patterns, by setting \code{savefuns=TRUE}. Then the return value also has an attribute \code{"simfuns"} containing all the summary functions for the individual simulated patterns. It is an \code{"fv"} object containing functions named \code{sim1, sim2, ...} representing the \code{nsim} summary functions. It is also possible to save the simulated point patterns themselves, by setting \code{savepatterns=TRUE}. Then the return value also has an attribute \code{"simpatterns"} which is a list of length \code{nsim} containing all the simulated point patterns. See \code{\link[spatstat.explore]{plot.envelope}} and \code{\link[spatstat.explore]{plot.fv}} for information about how to plot the envelopes. Different envelopes can be recomputed from the same data using \code{\link[spatstat.explore]{envelope.envelope}}. Envelopes can be combined using \code{\link[spatstat.explore]{pool.envelope}}. } \section{Errors and warnings}{ An error may be generated if one of the simulations produces a point pattern that is empty, or is otherwise unacceptable to the function \code{fun}. The upper envelope may be \code{NA} (plotted as plus or minus infinity) if some of the function values computed for the simulated point patterns are \code{NA}. Whether this occurs will depend on the function \code{fun}, but it usually happens when the simulated point pattern does not contain enough points to compute a meaningful value. } \section{Confidence intervals}{ Simulation envelopes do \bold{not} compute confidence intervals; they generate significance bands. If you really need a confidence interval for the true summary function of the point process, use \code{\link[spatstat.explore]{lohboot}}. See also \code{\link[spatstat.explore]{varblock}}. } \section{Edge corrections}{ It is common to apply a correction for edge effects when calculating a summary function such as the \eqn{K} function. Typically the user has a choice between several possible edge corrections. In a call to \code{envelope}, the user can specify the edge correction to be applied in \code{fun}, using the argument \code{correction}. See the Examples below. \describe{ \item{Summary functions in \pkg{spatstat}}{ Summary functions that are available in \pkg{spatstat}, such as \code{\link[spatstat.explore]{Kest}}, \code{\link[spatstat.explore]{Gest}} and \code{\link[spatstat.explore]{pcf}}, have a standard argument called \code{correction} which specifies the name of one or more edge corrections. The list of available edge corrections is different for each summary function, and may also depend on the kind of window in which the point pattern is recorded. In the case of \code{Kest} (the default and most frequently used value of \code{fun}) the best edge correction is Ripley's isotropic correction if the window is rectangular or polygonal, and the translation correction if the window is a binary mask. See the help files for the individual functions for more information. All the summary functions in \pkg{spatstat} recognise the option \code{correction="best"} which gives the \dQuote{best} (most accurate) available edge correction for that function. In a call to \code{envelope}, if \code{fun} is one of the summary functions provided in \pkg{spatstat}, then the default is \code{correction="best"}. This means that \emph{by default, the envelope will be computed using the \dQuote{best} available edge correction}. The user can override this default by specifying the argument \code{correction}. For example the computation can be accelerated by choosing another edge correction which is less accurate than the \dQuote{best} one, but faster to compute. } \item{User-written summary functions}{ If \code{fun} is a function written by the user, then \code{envelope} has to guess what to do. If \code{fun} has an argument called \code{correction}, or has \code{\dots} arguments, then \code{envelope} assumes that the function can handle a correction argument. To compute the envelope, \code{fun} will be called with a \code{correction} argument. The default is \code{correction="best"}, unless overridden in the call to \code{envelope}. Otherwise, if \code{fun} does not have an argument called \code{correction} and does not have \code{\dots} arguments, then \code{envelope} assumes that the function \emph{cannot} handle a correction argument. To compute the envelope, \code{fun} is called without a correction argument. } } } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Arnold, 2003. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link[spatstat.explore]{dclf.test}}, \code{\link[spatstat.explore]{mad.test}} for envelope-based tests. \code{\link[spatstat.explore]{fv.object}}, \code{\link[spatstat.explore]{plot.envelope}}, \code{\link[spatstat.explore]{plot.fv}}, \code{\link[spatstat.explore]{envelope.envelope}}, \code{\link[spatstat.explore]{pool.envelope}} for handling envelopes. There are also methods for \code{print} and \code{summary}. \code{\link[spatstat.explore]{Kest}}, \code{\link[spatstat.explore]{Gest}}, \code{\link[spatstat.explore]{Fest}}, \code{\link[spatstat.explore]{Jest}}, \code{\link[spatstat.explore]{pcf}}, \code{\link[spatstat.geom]{ppp}}, \code{\link[spatstat.model]{ppm}}, \code{\link[spatstat.random]{default.expand}} } \examples{ X <- simdat online <- interactive() Nsim <- if(online) 19 else 3 # Envelope of K function under CSR plot(envelope(X, nsim=Nsim)) # Translation edge correction (this is also FASTER): if(online) { plot(envelope(X, correction="translate")) } else { E <- envelope(X, nsim=Nsim, correction="translate") } # Global envelopes if(online) { plot(envelope(X, Lest, global=TRUE)) plot(envelope(X, Kest, global=TRUE, scale=function(r) { r })) } else { E <- envelope(X, Lest, nsim=Nsim, global=TRUE) E <- envelope(X, Kest, nsim=Nsim, global=TRUE, scale=function(r) { r }) E summary(E) } # Envelope of G function under CSR if(online) { plot(envelope(X, Gest)) } else { E <- envelope(X, Gest, correction="rs", nsim=Nsim) } # Envelope of L function under CSR # L(r) = sqrt(K(r)/pi) if(online) { E <- envelope(X, Kest) } else { E <- envelope(X, Kest, correction="border", nsim=Nsim) } plot(E, sqrt(./pi) ~ r) # Simultaneous critical envelope for L function # (alternatively, use Lest) if(online) { plot(envelope(X, Kest, transform=expression(sqrt(./pi)), global=TRUE)) } else { E <- envelope(X, Kest, nsim=Nsim, correction="border", transform=expression(sqrt(./pi)), global=TRUE) } ## One-sided envelope if(online) { plot(envelope(X, Lest, alternative="less")) } else { E <- envelope(X, Lest, nsim=Nsim, alternative="less") } # How to pass arguments needed to compute the summary functions: # We want envelopes for Jcross(X, "A", "B") # where "A" and "B" are types of points in the dataset 'demopat' if(online) { plot(envelope(demopat, Jcross, i="A", j="B")) } else { plot(envelope(demopat, Jcross, correction="rs", i="A", j="B", nsim=Nsim)) } # Use of `simulate' expression if(online) { plot(envelope(cells, Gest, simulate=expression(runifpoint(42)))) plot(envelope(cells, Gest, simulate=expression(rMaternI(100,0.02)))) } else { plot(envelope(cells, Gest, correction="rs", simulate=expression(runifpoint(42)), nsim=Nsim)) plot(envelope(cells, Gest, correction="rs", simulate=expression(rMaternI(100, 0.02)), nsim=Nsim, global=TRUE)) } # Use of `simulate' function if(online) { plot(envelope(amacrine, Kcross, simulate=rlabel)) } else { plot(envelope(amacrine, Kcross, simulate=rlabel, nsim=Nsim)) } # Envelope under random toroidal shifts if(online) { plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.25)))) } # Envelope under random shifts with erosion if(online) { plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.1, edge="erode")))) } # Note that the principle of symmetry, essential to the validity of # simulation envelopes, requires that both the observed and # simulated patterns be subjected to the same method of intensity # estimation. In the following example it would be incorrect to set the # argument 'lambda=red.dens' in the envelope command, because this # would mean that the inhomogeneous K functions of the simulated # patterns would be computed using the intensity function estimated # from the original redwood data, violating the symmetry. There is # still a concern about the fact that the simulations are generated # from a model that was fitted to the data; this is only a problem in # small datasets. if(online) { red.dens <- density(redwood, sigma=bw.diggle, positive=TRUE) plot(envelope(redwood, Kinhom, sigma=bw.diggle, simulate=expression(rpoispp(red.dens)))) } # Precomputed list of point patterns if(online) { nX <- npoints(X) PatList <- list() for(i in 1:Nsim) PatList[[i]] <- runifpoint(nX) E <- envelope(X, Kest, nsim=19, simulate=PatList) } else { PatList <- list() for(i in 1:Nsim) PatList[[i]] <- runifpoint(10) } E <- envelope(X, Kest, nsim=Nsim, simulate=PatList) # re-using the same point patterns EK <- envelope(X, Kest, nsim=Nsim, savepatterns=TRUE) EG <- envelope(X, Gest, nsim=Nsim, simulate=EK) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/markcorr.Rd0000644000176200001440000002704014643125461016465 0ustar liggesusers\name{markcorr} \alias{markcorr} \title{ Mark Correlation Function } \description{ Estimate the marked correlation function of a marked point pattern. } \usage{ markcorr(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, weights=NULL, f1=NULL, normalise=TRUE, fargs=NULL, internal=NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{weights}{ Optional. Numeric weights for each data point in \code{X}. A numeric vector, a pixel image, or a \code{function(x,y)}. Alternatively, an \code{expression} to be evaluated to yield the weights; the expression may involve the variables \code{x,y,marks} representing the coordinates and marks of\code{X}. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } \item{internal}{Do not use this argument.} } \value{ A function value table (object of class \code{"fv"}) or a list of function value tables, one for each column of marks. An object of class \code{"fv"} (see \code{\link{fv.object}}) is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{k_f(r)}{k[f](r)} when the marks attached to different points are independent, namely 1 } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark correlation function \eqn{k_f(r)}{k[f](r)} obtained by the edge corrections named. } \details{ By default, this command calculates an estimate of Stoyan's mark correlation \eqn{k_{mm}(r)}{k[mm](r)} for the point pattern. Alternatively if the argument \code{f} or \code{f1} is given, then it calculates Stoyan's generalised mark correlation \eqn{k_f(r)}{k[f](r)} with test function \eqn{f}. Theoretical definitions are as follows (see Stoyan and Stoyan (1994, p. 262)): \itemize{ \item For a point process \eqn{X} with numeric marks, Stoyan's mark correlation function \eqn{k_{mm}(r)}{k[mm](r)}, is \deqn{ k_{mm}(r) = \frac{E_{0u}[M(0) M(u)]}{E[M,M']} }{ k[mm](r) = E[0u](M(0) * M(u))/E(M * M') } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0),M(u)} denote the marks attached to these two points. On the denominator, \eqn{M,M'} are random marks drawn independently from the marginal distribution of marks, and \eqn{E} is the usual expectation. \item For a multitype point process \eqn{X}, the mark correlation is \deqn{ k_{mm}(r) = \frac{P_{0u}[M(0) M(u)]}{P[M = M']} }{ k[mm](r) = P[0u](M(0) = M(u))/P(M = M') } where \eqn{P} and \eqn{P_{0u}}{P[0u]} denote the probability and conditional probability. \item The \emph{generalised} mark correlation function \eqn{k_f(r)}{k[f](r)} of a marked point process \eqn{X}, with test function \eqn{f}, is \deqn{ k_f(r) = \frac{E_{0u}[f(M(0),M(u))]}{E[f(M,M')]} }{ k[f](r) = E[0u](f(M(0),M(u))]/E(f(M,M')) } } The test function \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous nonnegative real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2)}. Note that \eqn{k_f(r)}{k[f](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_f(r) \equiv 1}{k[f](r) = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern. The argument \code{f} determines the function to be applied to pairs of marks. It has a sensible default, which depends on the kind of marks in \code{X}. If the marks are numeric values, then \code{f <- function(m1, m2) { m1 * m2}} computes the product of two marks. If the marks are a factor (i.e. if \code{X} is a multitype point pattern) then \code{f <- function(m1, m2) { m1 == m2}} yields the value 1 when the two marks are equal, and 0 when they are unequal. These are the conventional definitions for numerical marks and multitype points respectively. The argument \code{f} may be specified by the user. It must be an \R function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). (It may also take additional arguments, passed through \code{fargs}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative, and \code{NA} values are not permitted. Alternatively the user may specify the argument \code{f1} instead of \code{f}. This indicates that the test function \eqn{f} should take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)} where \eqn{f_1(u)}{f1(u)} is given by the argument \code{f1}. The argument \code{f1} should be an \R function with at least one argument. (It may also take additional arguments, passed through \code{fargs}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } If \code{normalise=FALSE} then the algorithm will compute only the numerator \deqn{ c_f(r) = E_{0u} f(M(0),M(u)) }{ c[f](r) = E[0u] f(M(0),M(u)) } of the expression for the mark correlation function. In this case, negative values of \code{f} are permitted. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. Mark cross-correlation function \code{\link{markcrosscorr}} for point patterns with several columns of marks. \code{\link{Kmark}} to estimate a cumulative function related to the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- markcorr(spruces) plot(ms) # (2) simulated data with independent marks \donttest{ X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) Xc <- markcorr(X) plot(Xc) } # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' X <- if(interactive()) amacrine else amacrine[c(FALSE, TRUE)] # (3) Kernel density estimate with Epanecnikov kernel # (as proposed by Stoyan & Stoyan) M <- markcorr(X, function(m1,m2) {m1==m2}, correction="translate", method="density", kernel="epanechnikov") # Note: kernel="epanechnikov" comes from help(density) # (4) Same again with explicit control over bandwidth \donttest{ M <- markcorr(X, correction="translate", method="density", kernel="epanechnikov", bw=0.02) # see help(density) for correct interpretation of 'bw' } \testonly{ niets <- markcorr(X, function(m1,m2){m1 == m2}, method="loess") if(require(sm)) niets <- markcorr(X, correction="isotropic", method="smrep", hmult=2) } # weighted mark correlation X <- if(interactive()) betacells else betacells[c(TRUE,FALSE)] Y <- subset(X, select=type) a <- marks(X)$area v <- markcorr(Y, weights=a) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/methods.ssf.Rd0000644000176200001440000000662614611073324017104 0ustar liggesusers\name{methods.ssf} \alias{methods.ssf} %DoNotExport \alias{marks.ssf} \alias{marks<-.ssf} \alias{unmark.ssf} \alias{as.im.ssf} \alias{as.function.ssf} \alias{as.ppp.ssf} \alias{print.ssf} \alias{summary.ssf} \alias{range.ssf} \alias{min.ssf} \alias{max.ssf} \alias{integral.ssf} \title{Methods for Spatially Sampled Functions} \description{ Methods for various generic commands, for the class \code{"ssf"} of spatially sampled functions. } \usage{ \method{marks}{ssf}(x, \dots) \method{marks}{ssf}(x, \dots) <- value \method{unmark}{ssf}(X) \method{as.im}{ssf}(X, \dots) \method{as.function}{ssf}(x, \dots) \method{as.ppp}{ssf}(X, \dots) \method{print}{ssf}(x, \dots, brief=FALSE) \method{summary}{ssf}(object, \dots) \method{range}{ssf}(x, \dots) \method{min}{ssf}(x, \dots) \method{max}{ssf}(x, \dots) \method{integral}{ssf}(f, domain=NULL, ..., weights=attr(f, "weights")) } \arguments{ \item{x,X,f,object}{ A spatially sampled function (object of class \code{"ssf"}). } \item{\dots}{Arguments passed to the default method.} \item{brief}{Logical value controlling the amount of detail printed.} \item{value}{Matrix of replacement values for the function.} \item{domain}{Optional. Domain of integration. An object of class\code{"owin"} or \code{"tess"}. } \item{weights}{ Optional. Numeric vector of \emph{quadrature weights} associated with the sample points. } } \value{ \code{marks} returns a matrix. \code{marks(x) <- value} returns an object of class \code{"ssf"}. \code{as.owin} returns a window (object of class \code{"owin"}). \code{as.ppp} and \code{unmark} return a point pattern (object of class \code{"ppp"}). \code{as.function} returns a \code{function(x,y)} of class \code{"funxy"}. \code{print} returns \code{NULL}. \code{summary} returns an object of class \code{"summary.ssf"} which has a print method. \code{range} returns a numeric vector of length 2. \code{min} and \code{max} return a single numeric value. \code{integral} returns a numeric or complex value, vector, or matrix. \code{integral(f)} returns a numeric or complex value (if \code{f} had numeric or complex values) or a numeric vector (if \code{f} had vector values). If \code{domain} is a tessellation then \code{integral(f, domain)} returns a numeric or complex vector with one entry for each tile (if \code{f} had numeric or complex values) or a numeric matrix with one row for each tile (if \code{f} had vector values). } \details{ An object of class \code{"ssf"} represents a function (real- or vector-valued) that has been sampled at a finite set of points. The commands documented here are methods for this class, for the generic commands \code{\link[spatstat.geom]{marks}}, \code{\link[spatstat.geom]{marks<-}}, \code{\link[spatstat.geom]{unmark}}, \code{\link[spatstat.geom]{as.im}}, \code{\link[base]{as.function}}, \code{\link[spatstat.geom]{as.ppp}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[base]{range}}, \code{\link[base]{min}}, \code{\link[base]{max}} and \code{\link[spatstat.univar]{integral}}. } \seealso{ \code{\link{ssf}} } \examples{ g <- distfun(cells[1:4]) X <- rsyst(Window(cells), 10) f <- ssf(X, g(X)) f summary(f) marks(f) as.ppp(f) as.im(f) integral(f) integral(f, quadrats(Window(f), 3)) } \author{Adrian Baddeley} \keyword{spatial} \keyword{methods} spatstat.explore/man/domain.quadrattest.Rd0000644000176200001440000000313514643125461020453 0ustar liggesusers\name{domain.quadrattest} \alias{domain.quadrattest} \title{ Extract the Domain of any Spatial Object } \description{ Given a spatial object such as a point pattern, in any number of dimensions, this function extracts the spatial domain in which the object is defined. } \usage{ \method{domain}{quadrattest}(X, \dots) } \arguments{ \item{X}{ A spatial object such as a point pattern (in any number of dimensions), line segment pattern or pixel image. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } } \details{ The function \code{\link[spatstat.geom]{domain}} is generic. For a spatial object \code{X} in any number of dimensions, \code{domain(X)} extracts the spatial domain in which \code{X} is defined. For a two-dimensional object \code{X}, typically \code{domain(X)} is the same as \code{Window(X)}. Exceptions occur for methods related to linear networks. } \value{ A spatial object representing the domain of \code{X}. Typically a window (object of class \code{"owin"}), a three-dimensional box (\code{"box3"}), a multidimensional box (\code{"boxx"}) or a linear network (\code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.geom]{domain}}, \code{\link[spatstat.geom]{domain.quadratcount}}, \code{\link[spatstat.model]{domain.ppm}}, \code{\link[spatstat.random]{domain.rmhmodel}}, \code{\link[spatstat.linnet]{domain.lpp}}. \code{\link[spatstat.geom]{Window}}, \code{\link[spatstat.geom]{Frame}}. } \examples{ domain(quadrat.test(redwood, 2, 2)) } \keyword{spatial} \keyword{manip} spatstat.explore/man/compatible.fv.Rd0000644000176200001440000000265614643125461017404 0ustar liggesusers\name{compatible.fv} \alias{compatible.fv} \title{Test Whether Function Objects Are Compatible} \description{ Tests whether two or more function objects (class \code{"fv"}) are compatible. } \usage{ \method{compatible}{fv}(A, B, \dots, samenames=TRUE) } \arguments{ \item{A,B,\dots}{Two or more function value objects (class \code{"fv"}).} \item{samenames}{ Logical value indicating whether to check for complete agreement between the column names of the objects (\code{samenames=TRUE}, the default) or just to check that the name of the function argument is the same (\code{samenames=FALSE}). } } \details{ An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link[spatstat.explore]{Kest}} and its relatives. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link[spatstat.geom]{compatible}}. The functions are compatible if they have been evaluated at the same sequence of values of the argument \code{r}, and if the statistical estimates have the same names. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fv}} } \author{\adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.explore/man/pcf3est.Rd0000644000176200001440000001100614643125461016207 0ustar liggesusers\name{pcf3est} \Rdversion{1.1} \alias{pcf3est} \title{ Pair Correlation Function of a Three-Dimensional Point Pattern } \description{ Estimates the pair correlation function from a three-dimensional point pattern. } \usage{ pcf3est(X, \dots, rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{delta}{ Optional. Half-width of the Epanechnikov smoothing kernel. } \item{adjust}{ Optional. Adjustment factor for the default value of \code{delta}. } \item{biascorrect}{ Logical value. Whether to correct for underestimation due to truncation of the kernel near \eqn{r=0}. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the pair correlation function is \deqn{ g_3(r) = \frac{K_3'(r)}{4\pi r^2} }{ g3(r) = K3'(r)/(4 * pi * r^2) } where \eqn{K_3'}{K3'} is the derivative of the three-dimensional \eqn{K}-function (see \code{\link{K3est}}). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. Kernel smoothing is applied to these distance values (weighted by an edge correction factor) and the result is renormalised to give the estimate of \eqn{g_3(r)}{g3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Kernel smoothing is performed using the Epanechnikov kernel with half-width \code{delta}. If \code{delta} is missing, the default is to use the rule-of-thumb \eqn{\delta = 0.26/\lambda^{1/3}}{delta = 0.26/lambda^(1/3)} where \eqn{\lambda = n/v}{lambda = n/v} is the estimated intensity, computed from the number \eqn{n} of data points and the volume \eqn{v} of the enclosing box. This default value of \code{delta} is multiplied by the factor \code{adjust}. The smoothing estimate of the pair correlation \eqn{g_3(r)}{g3(r)} is typically an underestimate when \eqn{r} is small, due to truncation of the kernel at \eqn{r=0}. If \code{biascorrect=TRUE}, the smoothed estimate is approximately adjusted for this bias. This is advisable whenever the dataset contains a sufficiently large number of points. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. Additionally the value of \code{delta} is returned as an attribute of this object. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ \adrian and Rana Moyeed. } \seealso{ \code{\link[spatstat.geom]{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{F3est}}, \code{\link{G3est}}, \code{\link{K3est}} for other summary functions of a three-dimensional point pattern. \code{\link{pcf}} to estimate the pair correlation function of point patterns in two dimensions or other spaces. } \examples{ X <- rpoispp3(250) Z <- pcf3est(X) Zbias <- pcf3est(X, biascorrect=FALSE) if(interactive()) { opa <- par(mfrow=c(1,2)) plot(Z, ylim.covers=c(0, 1.2)) plot(Zbias, ylim.covers=c(0, 1.2)) par(opa) } attr(Z, "delta") } \keyword{spatial} \keyword{nonparametric} \concept{Three-dimensional} spatstat.explore/man/bw.scott.Rd0000644000176200001440000000606714611073323016410 0ustar liggesusers\name{bw.scott} \alias{bw.scott} \alias{bw.scott.iso} \title{ Scott's Rule for Bandwidth Selection for Kernel Density } \description{ Use Scott's rule of thumb to determine the smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.scott(X, isotropic=FALSE, d=NULL) bw.scott.iso(X) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{isotropic}{ Logical value indicating whether to compute a single bandwidth for an isotropic Gaussian kernel (\code{isotropic=TRUE}) or separate bandwidths for each coordinate axis (\code{isotropic=FALSE}, the default). } \item{d}{ Advanced use only. An integer value that should be used in Scott's formula instead of the true number of spatial dimensions. } } \details{ These functions select a bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}} or other appropriate functions. They can be applied to a point pattern belonging to any class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}. The bandwidth \eqn{\sigma}{\sigma} is computed by the rule of thumb of Scott (1992, page 152, equation 6.42). The bandwidth is proportional to \eqn{n^{-1/(d+4)}}{n^(-1/(d+4))} where \eqn{n} is the number of points and \eqn{d} is the number of spatial dimensions. This rule is very fast to compute. It typically produces a larger bandwidth than \code{\link{bw.diggle}}. It is useful for estimating gradual trend. If \code{isotropic=FALSE} (the default), \code{bw.scott} provides a separate bandwidth for each coordinate axis, and the result of the function is a vector, of length equal to the number of coordinates. If \code{isotropic=TRUE}, a single bandwidth value is computed and the result is a single numeric value. \code{bw.scott.iso(X)} is equivalent to \code{bw.scott(X, isotropic=TRUE)}. The default value of \eqn{d} is as follows: \tabular{ll}{ \bold{class} \tab \bold{dimension} \cr \code{"ppp"} \tab 2 \cr \code{"lpp"} \tab 1 \cr \code{"pp3"} \tab 3 \cr \code{"ppx"} \tab number of spatial coordinates } The use of \code{d=1} for point patterns on a linear network (class \code{"lpp"}) was proposed by McSwiggan et al (2016) and Rakshit et al (2019). } \value{ A numerical value giving the selected bandwidth, or a numerical vector giving the selected bandwidths for each coordinate. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.CvL}}, \code{\link{bw.frac}}. } \examples{ hickory <- split(lansing)[["hickory"]] b <- bw.scott(hickory) b if(interactive()) { plot(density(hickory, b)) } bw.scott.iso(hickory) bw.scott(osteo$pts[[1]]) } \references{ Scott, D.W. (1992) \emph{Multivariate Density Estimation. Theory, Practice and Visualization}. New York: Wiley. } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/bw.abram.ppp.Rd0000644000176200001440000001606414627320076017141 0ustar liggesusers\name{bw.abram.ppp} \alias{bw.abram.ppp} \title{ Abramson's Adaptive Bandwidths For Spatial Point Pattern } \description{ Computes adaptive smoothing bandwidths for a spatial point pattern, according to the inverse-square-root rule of Abramson (1982). } \usage{ \method{bw.abram}{ppp}(X, h0, \dots, at=c("points", "pixels"), hp = h0, pilot = NULL, trim=5, smoother=density.ppp) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) for which the variable bandwidths should be computed. } \item{h0}{ A scalar value giving the global smoothing bandwidth in the same units as the coordinates of \code{X}. The default is \code{h0=\link{bw.ppl}(X)}. } \item{\dots}{ Additional arguments passed to \code{\link[spatstat.geom]{as.im}} to control the pixel resolution, or passed to \code{\link{density.ppp}} or \code{smoother} to control the type of smoothing, when computing the pilot estimate. } \item{at}{ Character string (partially matched) specifying whether to compute bandwidth values at the points of \code{X} (\code{at="points"}, the default) or to compute bandwidths at every pixel in a fine pixel grid (\code{at="pixels"}). } \item{hp}{ Optional. A scalar pilot bandwidth, used for estimation of the pilot density if required. Ignored if \code{pilot} is a pixel image (object of class \code{"im"}); see below. } \item{pilot}{ Optional. Specification of a pilot density (possibly unnormalised). If \code{pilot=NULL} the pilot density is computed by applying fixed-bandwidth density estimation to \code{X} using bandwidth \code{hp}. If \code{pilot} is a point pattern, the pilot density is is computed using a fixed-bandwidth estimate based on \code{pilot} and \code{hp}. If \code{pilot} is a pixel image (object of class \code{"im"}), this is taken to be the (possibly unnormalised) pilot density, and \code{hp} is ignored. } \item{trim}{ A trimming value required to curb excessively large bandwidths. See Details. The default is sensible in most cases. } \item{smoother}{ Smoother for the pilot. A function or character string, specifying the function to be used to compute the pilot estimate when \code{pilot} is \code{NULL} or is a point pattern. } } \details{ This function computes adaptive smoothing bandwidths using the methods of Abramson (1982) and Hall and Marron (1988). The function \code{\link[spatstat.univar]{bw.abram}} is generic. The function \code{bw.abram.ppp} documented here is the method for spatial point patterns (objects of class \code{"ppp"}). If \code{at="points"} (the default) a smoothing bandwidth is computed for each point in the pattern \code{X}. Alternatively if \code{at="pixels"} a smoothing bandwidth is computed for each spatial location in a pixel grid. Under the Abramson-Hall-Marron rule, the bandwidth at location \eqn{u} is \deqn{ h(u) = \mbox{\texttt{h0}} * \mbox{min}[ \frac{\tilde{f}(u)^{-1/2}}{\gamma}, \mbox{\texttt{trim}} ] }{ h(u) = h0 * min(\tilde{f}(u)^{-1/2}/\gamma, trim) } where \eqn{\tilde{f}(u)} is a pilot estimate of the spatially varying probability density. The variable bandwidths are rescaled by \eqn{\gamma}, the geometric mean of the \eqn{\tilde{f}(u)^{-1/2}} terms evaluated at the data; this allows the global bandwidth \code{h0} to be considered on the same scale as a corresponding fixed bandwidth. The trimming value \code{trim} has the same interpretation as the required `clipping' of the pilot density at some small nominal value (see Hall and Marron, 1988), to necessarily prevent extreme bandwidths (which can occur at very isolated observations). The pilot density or intensity is determined as follows: \itemize{ \item If \code{pilot} is a pixel image, this is taken as the pilot density or intensity. \item If \code{pilot} is \code{NULL}, then the pilot intensity is computed as a fixed-bandwidth kernel intensity estimate using \code{\link{density.ppp}} applied to the data pattern \code{X} using the pilot bandwidth \code{hp}. \item If \code{pilot} is a different point pattern on the same spatial domain as \code{X}, then the pilot intensity is computed as a fixed-bandwidth kernel intensity estimate using \code{\link{density.ppp}} applied to \code{pilot} using the pilot bandwidth \code{hp}. } In each case the pilot density or intensity is renormalised to become a probability density, and then the Abramson rule is applied. Instead of calculating the pilot as a fixed-bandwidth density estimate, the user can specify another density estimation procedure using the argument \code{smoother}. This should be either a function or the character string name of a function. It will replace \code{\link{density.ppp}} as the function used to calculate the pilot estimate. The pilot estimate will be computed as \code{smoother(X, sigma=hp, ...)} if \code{pilot} is \code{NULL}, or \code{smoother(pilot, sigma=hp, ...)} if \code{pilot} is a point pattern. If \code{smoother} does not recognise the argument name \code{sigma} for the smoothing bandwidth, then \code{hp} is effectively ignored, as shown in the Examples. } \value{ Either a numeric vector of length \code{npoints(X)} giving the Abramson bandwidth for each point (when \code{at = "points"}, the default), or the entire pixel \code{\link[spatstat.geom]{im}}age of the Abramson bandwidths over the relevant spatial domain (when \code{at = "pixels"}). } \seealso{ \code{\link[spatstat.univar]{bw.abram}} } \references{ Abramson, I. (1982) On bandwidth variation in kernel estimates --- a square root law. \emph{Annals of Statistics}, \bold{10}(4), 1217-1223. Davies, T.M. and Baddeley, A. (2018) Fast computation of spatially adaptive kernel estimates. \emph{Statistics and Computing}, \bold{28}(4), 937-956. Davies, T.M., Marshall, J.C., and Hazelton, M.L. (2018) Tutorial on kernel estimation of continuous spatial and spatiotemporal relative risk. \emph{Statistics in Medicine}, \bold{37}(7), 1191-1221. Hall, P. and Marron, J.S. (1988) Variable window width kernel density estimates of probability densities. \emph{Probability Theory and Related Fields}, \bold{80}, 37-49. Silverman, B.W. (1986) \emph{Density Estimation for Statistics and Data Analysis}. Chapman and Hall, New York. } \author{ \tilman. Adapted by \adrian. } \examples{ # 'ch' just 58 laryngeal cancer cases ch <- split(chorley)[[1]] h <- bw.abram(ch,h0=1,hp=0.7) length(h) summary(h) if(interactive()) hist(h) # calculate pilot based on all 1036 observations h.pool <- bw.abram(ch,h0=1,hp=0.7,pilot=chorley) length(h.pool) summary(h.pool) if(interactive()) hist(h.pool) # get full image used for 'h' above him <- bw.abram(ch,h0=1,hp=0.7,at="pixels") plot(him);points(ch,col="grey") # use Voronoi-Dirichlet pilot ('hp' is ignored) hvo <- bw.abram(ch, h0=1, smoother=densityVoronoi) } \keyword{spatial} \keyword{nonparametric} \concept{Adaptive smoothing} \concept{Bandwidth selection} spatstat.explore/man/densityAdaptiveKernel.splitppp.Rd0000644000176200001440000000716414643125461023022 0ustar liggesusers\name{densityAdaptiveKernel.splitppp} \alias{densityAdaptiveKernel.splitppp} \alias{densityAdaptiveKernel.ppplist} \title{Adaptive Kernel Estimate of Intensity for Split Point Pattern} \description{ Computes an adaptive estimate of the intensity function (using a variable-bandwidth smoothing kernel) for each of the components of a split point pattern, or each of the point patterns in a list. } \usage{ \method{densityAdaptiveKernel}{splitppp}(X, bw=NULL, \dots, weights=NULL) \method{densityAdaptiveKernel}{ppplist}(X, bw=NULL, \dots, weights=NULL) } \arguments{ \item{X}{ Split point pattern (object of class \code{"splitppp"} created by \code{\link[spatstat.geom]{split.ppp}}) to be smoothed. Alternatively a list of point patterns, of class \code{"ppplist"}. } \item{bw}{ Smoothing bandwidths. See Details. } \item{\dots}{ Additional arguments passed to \code{\link{densityAdaptiveKernel.ppp}}. These may include arguments that will be passed to \code{\link{bw.abram.ppp}} to compute the smoothing bandwidths if \code{bw} is missing, and arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the spatial resolution of the result. } \item{weights}{ Numerical weights for the points. See Details. } } \details{ This function computes a spatially-adaptive kernel estimate of the spatially-varying intensity for each of the point patterns in the list \code{X}, using \code{\link{densityAdaptiveKernel.ppp}}. The argument \code{bw} specifies smoothing bandwidths for the data points. Normally it should be a list, with the same length as \code{x}. The entry \code{bw[[i]]} will determine the smoothing bandwidths for the pattern \code{x[[i]]}, and may be given in any format acceptable to \code{\link{densityAdaptiveKernel.ppp}}. For example, \code{bw[[i]]} can be a numeric vector of length equal to \code{npoints(x[[i]])}, a single numeric value, a pixel image (object of class \code{"im"}), an \code{expression}, or a function of class \code{"funxy"}. For convenience, \code{bw} can also be a single \code{expression}, or a single pixel image, or a single function. If \code{bw} is missing or \code{NULL}, the default is to compute bandwidths using \code{\link{bw.abram.ppp}}. The argument \code{weights} specifies numerical case weights for the data points. Normally it should be a list, with the same length as \code{x}. The entry \code{weights[[i]]} will determine the case weights for the pattern \code{x[[i]]}, and may be given in any format acceptable to \code{\link{density.ppp}}. For example, \code{weights[[i]]} can be a numeric vector of length equal to \code{npoints(x[[i]])}, a single numeric value, a numeric matrix, a pixel image (object of class \code{"im"}), an \code{expression}, or a function of class \code{"funxy"}. For convenience, \code{weights} can also be a single \code{expression}, or a single pixel image (object of class \code{"im"}), or a single function of class \code{"funxy"}. If \code{weights} is missing or \code{NULL}, all weights are assumed to be equal to 1. } \value{ A list of pixel images (objects of class \code{"im"}) which can be plotted or printed; or a list of numeric vectors giving the values at specified points. } \author{ \adrian. } \seealso{ \code{\link{densityAdaptiveKernel.ppp}}, \code{\link{bw.abram.ppp}}. } \examples{ X <- amacrine if(!interactive()) X <- X[c(TRUE,FALSE,FALSE,FALSE)] Z <- densityAdaptiveKernel(split(X), h0=0.15) plot(Z, main="Adaptive kernel estimate") } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Adaptive smoothing} spatstat.explore/man/densityAdaptiveKernel.ppp.Rd0000644000176200001440000001344514643125461021745 0ustar liggesusers\name{densityAdaptiveKernel.ppp} \alias{densityAdaptiveKernel.ppp} \title{Adaptive Kernel Estimate of Intensity of Point Pattern} \description{ Computes an adaptive estimate of the intensity function of a point pattern using a variable-bandwidth smoothing kernel. } \usage{ \method{densityAdaptiveKernel}{ppp}(X, bw, \dots, weights=NULL, at=c("pixels", "points"), edge=TRUE, ngroups) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{bw}{ Numeric vector of smoothing bandwidths for each point in \code{X}, or a pixel image giving the smoothing bandwidth at each spatial location, or a spatial function of class \code{"funxy"} giving the smoothing bandwidth at each location. The default is to compute bandwidths using \code{\link{bw.abram.ppp}}. } \item{\dots}{ Arguments passed to \code{\link{bw.abram.ppp}} to compute the smoothing bandwidths if \code{bw} is missing, or passed to \code{\link[spatstat.geom]{as.mask}} to control the spatial resolution of the result. } \item{weights}{ Optional vector of numeric weights for the points of \code{X}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{edge}{ Logical value indicating whether to perform edge correction. } \item{ngroups}{ Number of groups into which the bandwidth values should be partitioned and discretised. } } \details{ This function computes a spatially-adaptive kernel estimate of the spatially-varying intensity from the point pattern \code{X} using the partitioning technique of Davies and Baddeley (2018). The function \code{\link[spatstat.univar]{densityAdaptiveKernel}} is generic. This file documents the method for point patterns, \code{densityAdaptiveKernel.ppp}. The argument \code{bw} specifies the smoothing bandwidths to be applied to each of the points in \code{X}. It may be a numeric vector of bandwidth values, or a pixel image or function yielding the bandwidth values. If the points of \code{X} are \eqn{x_1,\ldots,x_n}{x[1], ..., x[n]} and the corresponding bandwidths are \eqn{\sigma_1,\ldots,\sigma_n}{\sigma[1], ..., \sigma[n]} then the adaptive kernel estimate of intensity at a location \eqn{u} is \deqn{ \hat\lambda(u) = \sum_{i=1}^n k(u, x_i, \sigma_i) }{ \lambda(u) = sum[i] e(x[i], k(u, x[i], \sigma[i]) } where \eqn{k(u, v, \sigma)} is the value at \eqn{u} of the (possibly edge-corrected) smoothing kernel with bandwidth \eqn{\sigma} induced by a data point at \eqn{v}. Exact computation of the estimate above can be time-consuming: it takes \eqn{n} times longer than fixed-bandwidth smoothing. The partitioning method of Davies and Baddeley (2018) accelerates this computation by partitioning the range of bandwidths into \code{ngroups} intervals, correspondingly subdividing the points of the pattern \code{X} into \code{ngroups} sub-patterns according to bandwidth, and applying fixed-bandwidth smoothing to each sub-pattern. The default value of \code{ngroups} is the integer part of the square root of the number of points in \code{X}, so that the computation time is only about \eqn{\sqrt{n}}{sqrt(n)} times slower than fixed-bandwidth smoothing. Any positive value of \code{ngroups} can be specified by the user. Specifying \code{ngroups=Inf} enforces exact computation of the estimate without partitioning. Specifying \code{ngroups=1} is the same as fixed-bandwidth smoothing with bandwidth \code{sigma=median(bw)}. } \section{Bandwidths and Bandwidth Selection}{ The function \code{densityAdaptiveKernel} computes one adaptive estimate of the intensity, determined by the smoothing bandwidth values \code{bw}. Typically the bandwidth values are computed by first computing a pilot estimate of the intensity, then using \code{\link{bw.abram.ppp}} to compute the vector of bandwidths according to Abramson's rule. This involves specifying a global bandwidth \code{h0}. The default bandwidths may work well in many contexts, but for optimal bandwidth selection, this calculation should be performed repeatedly with different values of \code{h0} to optimise the value of \code{h0}. This can be computationally demanding; we recommend the function \code{multiscale.density} in the \pkg{sparr} package which supports much faster bandwidth selection, using the FFT method of Davies and Baddeley (2018). } \value{ If \code{at="pixels"} (the default), the result is a pixel image. If \code{at="points"}, the result is a numeric vector with one entry for each data point in \code{X}. } \author{ \adrian and Tilman Davies. } \references{ Davies, T.M. and Baddeley, A. (2018) Fast computation of spatially adaptive kernel estimates. \emph{Statistics and Computing}, \bold{28}(4), 937-956. Hall, P. and Marron, J.S. (1988) Variable window width kernel density estimates of probability densities. \emph{Probability Theory and Related Fields}, \bold{80}, 37-49. Silverman, B.W. (1986) \emph{Density Estimation for Statistics and Data Analysis}. Chapman and Hall, New York. } \seealso{ \code{\link{bw.abram.ppp}}, \code{\link{density.ppp}}, \code{\link{adaptive.density}}, \code{\link{densityVoronoi}}, \code{\link[spatstat.geom]{im.object}}. See the function \code{bivariate.density} in the \pkg{sparr} package for a more flexible implementation, and \code{multiscale.density} for an implementation that is more efficient for bandwidth selection. } \examples{ Z <- densityAdaptiveKernel(redwood, h0=0.1) plot(Z, main="Adaptive kernel estimate") points(redwood, col="white") } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Adaptive smoothing} spatstat.explore/man/bw.optim.object.Rd0000644000176200001440000000424414611073323017644 0ustar liggesusers\name{bw.optim.object} \alias{bw.optim.object} %DoNotExport \title{Class of Optimized Bandwidths} \description{ An object of the class \code{"bw.optim"} represents a tuning parameter (usually a smoothing bandwidth) that has been selected automatically. The object can be used as if it were a numerical value, but it can also be plotted to show the optimality criterion. } \details{ An object of the class \code{"bw.optim"} represents the numerical value of a smoothing bandwidth, a threshold, or a similar tuning parameter, that has been selected by optimising a criterion such as cross-validation. The object is a numerical value, with some attributes that retain information about how the value was selected. Attributes include the vector of candidate values that were examined, the corresponding values of the optimality criterion, the name of the parameter, the name of the optimality criterion, and the units in which the parameter is measured. There are methods for \code{print}, \code{plot}, \code{summary}, \code{\link{as.data.frame}} and \code{\link{as.fv}} for the class \code{"bw.optim"}. The \code{print} method simply prints the numerical value of the parameter. The \code{summary} method prints this value, and states how this value was selected. The \code{plot} method produces a plot of the optimisation criterion against the candidate value of the parameter. The \code{as.data.frame} and \code{as.fv} methods extract this graphical information as a data frame or function table, respectively. } \seealso{ Functions which produce objects of class \code{bw.optim} include \code{\link{bw.CvL}}, \code{\link{bw.CvL.adaptive}}, \code{\link{bw.diggle}}, \code{\link[spatstat.linnet]{bw.lppl}}, \code{\link{bw.pcf}}, \code{\link{bw.ppl}}, \code{\link{bw.relrisk}}, \code{\link[spatstat.linnet]{bw.relrisk.lpp}}, \code{\link{bw.smoothppp}} and \code{\link[spatstat.linnet]{bw.voronoi}} } \examples{ Ns <- if(interactive()) 32 else 3 b <- bw.ppl(redwood, srange=c(0.02, 0.07), ns=Ns) b summary(b) plot(b) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{attribute} \concept{Bandwidth selection} spatstat.explore/man/localKdot.Rd0000644000176200001440000001144214611073324016553 0ustar liggesusers\name{localKdot} \alias{localKdot} \alias{localLdot} \title{Local Multitype K Function (Dot-Type)} \description{ for a multitype point pattern, computes the dot-type version of the local K function. } \usage{ localKdot(X, from, \dots, rmax = NULL, correction = "Ripley", verbose = TRUE, rvalue=NULL) localLdot(X, from, \dots, rmax = NULL, correction = "Ripley") } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} with marks which are a factor). } \item{\dots}{ Further arguments passed from \code{localLdot} to \code{localKdot}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{from}{ Type of points from which distances should be measured. A single value; one of the possible levels of \code{marks(X)}, or an integer indicating which level. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{ Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ Given a multitype spatial point pattern \code{X}, the local dot-type \eqn{K} function \code{localKdot} is the local version of the multitype \eqn{K} function \code{\link{Kdot}}. Recall that \code{Kdot(X, from)} is a sum of contributions from all pairs of points in \code{X} where the first point belongs to \code{from}. The \emph{local} dot-type \eqn{K} function is defined for each point \code{X[i]} that belongs to type \code{from}, and it consists of all the contributions to the dot-type \eqn{K} function that originate from point \code{X[i]}: \deqn{ K_{i,from,to}(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ K[i,from,to](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{K_{i,from}(r)}{K[i,from](r)} can also be interpreted as one of the summands that contributes to the global estimate of the \code{\link{Kdot}} function. By default, the function \eqn{K_{i,from}(r)}{K[i,from](r)} is computed for a range of \eqn{r} values for each point \eqn{i} belonging to type \code{from}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X} belonging to type \code{from}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X} belonging to type \code{from}. The local dot-type \eqn{L} function \code{localLdot} is computed by applying the transformation \eqn{L(r) = \sqrt{K(r)/(2\pi)}}{L(r) = sqrt(K(r)/(2*pi))}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern that belong to type \code{from}. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point of type \code{from}. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kdot}}, \code{\link{Ldot}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ X <- amacrine # compute all the local Ldot functions L <- localLdot(X) # plot all the local Ldot functions against r plot(L, main="local Ldot functions for amacrine", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 0.1 metres L12 <- localLdot(X, rvalue=0.1) } \author{ \ege and \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/segregation.test.Rd0000644000176200001440000000576214643125462020142 0ustar liggesusers\name{segregation.test} \alias{segregation.test} \alias{segregation.test.ppp} \title{ Test of Spatial Segregation of Types } \description{ Performs a Monte Carlo test of spatial segregation of the types in a multitype point pattern. } \usage{ segregation.test(X, \dots) \method{segregation.test}{ppp}(X, \dots, nsim = 19, permute = TRUE, verbose = TRUE, Xname) } \arguments{ \item{X}{ Multitype point pattern (object of class \code{"ppp"} with factor-valued marks). } \item{\dots}{ Additional arguments passed to \code{\link{relrisk.ppp}} to control the smoothing parameter or bandwidth selection. } \item{nsim}{ Number of simulations for the Monte Carlo test. } \item{permute}{ Argument passed to \code{\link[spatstat.random]{rlabel}}. If \code{TRUE} (the default), randomisation is performed by randomly permuting the labels of \code{X}. If \code{FALSE}, randomisation is performing by resampling the labels with replacement. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{Xname}{ Optional character string giving the name of the dataset \code{X}. } } \details{ The Monte Carlo test of spatial segregation of types, proposed by Kelsall and Diggle (1995) and Diggle et al (2005), is applied to the point pattern \code{X}. The test statistic is \deqn{ T = \sum_i \sum_m \left( \widehat p(m \mid x_i) - \overline p_m \right)^2 }{ T = sum[i] sum[m] (phat(m | x[i]) - pbar[m])^2 } where \eqn{\widehat p(m \mid x_i)}{phat(m | x[i])} is the leave-one-out kernel smoothing estimate of the probability that the \eqn{i}-th data point has type \eqn{m}, and \eqn{\overline p_m}{pbar[m]} is the average fraction of data points which are of type \eqn{m}. The statistic \eqn{T} is evaluated for the data and for \code{nsim} randomised versions of \code{X}, generated by randomly permuting or resampling the marks. Note that, by default, automatic bandwidth selection will be performed separately for each randomised pattern. This computation can be very time-consuming but is necessary for the test to be valid in most conditions. A short-cut is to specify the value of the smoothing bandwidth \code{sigma} as shown in the examples. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Bithell, J.F. (1991) Estimation of relative risk functions. \emph{Statistics in Medicine} \bold{10}, 1745--1751. Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. Diggle, P.J., Zheng, P. and Durr, P. (2005) Non-parametric estimation of spatial segregation in a multivariate point process: bovine tuberculosis in Cornwall, UK. \emph{Applied Statistics} \bold{54}, 645--658. } \seealso{ \code{\link{relrisk}} } \examples{ segregation.test(hyytiala, 5) if(interactive()) segregation.test(hyytiala, hmin=0.05) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{htest} spatstat.explore/man/rotmean.Rd0000644000176200001440000000537614643125462016323 0ustar liggesusers\name{rotmean} \alias{rotmean} \title{ Rotational Average of a Pixel Image } \description{ Compute the average pixel value over all rotations of the image about the origin, as a function of distance from the origin. } \usage{ rotmean(X, ..., origin, padzero=TRUE, Xname, result=c("fv", "im"), adjust=1) } \arguments{ \item{X}{ A pixel image. } \item{\dots}{ Ignored. } \item{origin}{ Optional. Origin about which the rotations should be performed. Either a numeric vector or a character string as described in the help for \code{\link[spatstat.geom]{shift.owin}}. } \item{padzero}{ Logical. If \code{TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. If \code{FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. } \item{Xname}{ Optional name for \code{X} to be used in the function labels. } \item{result}{ Character string specifying the kind of result required: either a function object or a pixel image. } \item{adjust}{ Adjustment factor for bandwidth used in kernel smoothing. } } \details{ This command computes, for each possible distance \eqn{r}, the average pixel value of the pixels lying at distance \eqn{r} from the origin. Kernel smoothing is used to obtain a smooth function of \eqn{r}. If \code{result="fv"} (the default) the result is a function object of class \code{"fv"} giving the mean pixel value of \code{X} as a function of distance from the origin. If \code{result="im"} the result is a pixel image, with the same dimensions as \code{X}, giving the mean value of \code{X} over all pixels lying at the same distance from the origin as the current pixel. If \code{padzero=TRUE} (the default), the value of \code{X} is assumed to be zero outside the window of \code{X}. The rotational mean at a given distance \eqn{r} is the average value of the image \code{X} over the \emph{entire} circle of radius \eqn{r}, including zero values outside the window if the circle lies partly outside the window. If \code{padzero=FALSE}, the value of \code{X} is taken to be undefined outside the window of \code{X}. The rotational mean is the average of the \code{X} values over the \emph{subset} of the circle of radius \eqn{r} that lies entirely inside the window. } \value{ An object of class \code{"fv"} or \code{"im"}, with the same coordinate units as \code{X}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{radcumint}} } \examples{ online <- interactive() resolution <- if(online) 128 else 32 Z <- setcov(square(1), dimyx=resolution) f <- rotmean(Z) if(online) { plot(rotmean(Z)) plot(rotmean(Z, result="im")) } } \keyword{spatial} \keyword{math} spatstat.explore/man/harmonise.fv.Rd0000644000176200001440000000560314643125461017245 0ustar liggesusers\name{harmonise.fv} \alias{harmonise.fv} \alias{harmonize.fv} \title{Make Function Tables Compatible} \description{ Convert several objects of class \code{"fv"} to the same values of the function argument. } \usage{ \method{harmonise}{fv}(\dots, strict=FALSE) \method{harmonize}{fv}(\dots, strict=FALSE) } \arguments{ \item{\dots}{ Any number of function tables (objects of class \code{"fv"}). } \item{strict}{ Logical. If \code{TRUE}, a column of data will be deleted if columns of the same name do not appear in every object. } } \details{ A function value table (object of class \code{"fv"}) is essentially a data frame giving the values of a function \eqn{f(x)} (or several alternative estimates of this value) at equally-spaced values of the function argument \eqn{x}. The command \code{\link[spatstat.geom]{harmonise}} is generic. This is the method for objects of class \code{"fv"}. This command makes any number of \code{"fv"} objects compatible, in the loose sense that they have the same sequence of values of \eqn{x}. They can then be combined by \code{\link{cbind.fv}}, but not necessarily by \code{\link{eval.fv}}. All arguments \code{\dots} must be function value tables (objects of class \code{"fv"}). The result will be a list, of length equal to the number of arguments \code{\dots}, containing new versions of each of these functions, converted to a common sequence of \eqn{x} values. If the arguments were named (\code{name=value}) then the return value also carries these names. The range of \eqn{x} values in the resulting functions will be the intersection of the ranges of \eqn{x} values in the original functions. The spacing of \eqn{x} values in the resulting functions will be the finest (narrowest) of the spacings of the \eqn{x} values in the original functions. Function values are interpolated using \code{\link[stats]{approxfun}}. If \code{strict=TRUE}, each column of data will be retained only if a column of the same name appears in all of the arguments \code{\dots}. This ensures that the resulting objects are strictly compatible in the sense of \code{\link{compatible.fv}}, and can be combined using \code{\link{eval.fv}} or \code{\link{collapse.fv}}. If \code{strict=FALSE} (the default), this does not occur, and then the resulting objects are \bold{not} guaranteed to be compatible in the sense of \code{\link{compatible.fv}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are objects of class \code{"fv"}. If the arguments were named (\code{name=value}) then the return value also carries these names. } \author{\spatstatAuthors.} \examples{ H <- harmonise(K=Kest(cells), G=Gest(cells)) H } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}}, \code{\link{eval.fv}}, \code{\link{compatible.fv}} } \keyword{spatial} \keyword{manip} spatstat.explore/man/as.owin.quadrattest.Rd0000644000176200001440000001477714643125461020600 0ustar liggesusers\name{as.owin.quadrattest} \alias{as.owin.quadrattest} \title{Convert Data To Class owin} \description{ Converts data specifying an observation window in any of several formats, into an object of class \code{"owin"}. } \usage{ \method{as.owin}{quadrattest}(W, \dots, fatal=TRUE) } \arguments{ \item{W}{ Data specifying an observation window, in any of several formats described under \emph{Details} below. } \item{fatal}{ Logical value determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"owin"} (see \code{\link[spatstat.geom]{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link[spatstat.geom]{owin.object}} for an overview. The generic function \code{\link[spatstat.geom]{as.owin}} converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{\link[spatstat.geom]{as.owin}} is generic, with methods for different classes of objects, and a default method. The argument \code{W} may be \itemize{ \item an object of class \code{"owin"} \item a structure with entries \code{xrange}, \code{yrange} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xmin}, \code{xmax}, \code{ymin}, \code{ymax} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle. This will accept objects of class \code{bbox} in the \code{sf} package. \item a numeric vector of length 4 (interpreted as \code{(xmin, xmax, ymin, ymax)} in that order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries named \code{xl}, \code{xu}, \code{yl}, \code{yu} (in any order) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle as \code{(xmin, xmax) = (xl, xu)} and \code{(ymin, ymax) = (yl, yu)}. This will accept objects of class \code{spp} used in the Venables and Ripley \pkg{spatial} package. \item an object of class \code{"ppp"} representing a point pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"psp"} representing a line segment pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"tess"} representing a tessellation. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"quad"} representing a quadrature scheme. In this case, the window of the \code{data} component will be extracted. \item an object of class \code{"im"} representing a pixel image. In this case, a window of type \code{"mask"} will be returned, with the same pixel raster coordinates as the image. An image pixel value of \code{NA}, signifying that the pixel lies outside the window, is transformed into the logical value \code{FALSE}, which is the corresponding convention for window masks. \item an object of class \code{"ppm"}, \code{"kppm"}, \code{"slrm"} or \code{"dppm"} representing a fitted point process model. In this case, if \code{from="data"} (the default), \code{as.owin} extracts the original point pattern data to which the model was fitted, and returns the observation window of this point pattern. If \code{from="covariates"} then \code{as.owin} extracts the covariate images to which the model was fitted, and returns a binary mask window that specifies the pixel locations. \item an object of class \code{"lpp"} representing a point pattern on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item an object of class \code{"lppm"} representing a fitted point process model on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item A \code{data.frame} with exactly three columns. Each row of the data frame corresponds to one pixel. Each row contains the \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value indicating whether the pixel lies inside the window. \item A \code{data.frame} with exactly two columns. Each row of the data frame contains the \eqn{x} and \eqn{y} coordinates of a pixel that lies inside the window. \item an object of class \code{"distfun"}, \code{"nnfun"} or \code{"funxy"} representing a function of spatial location, defined on a spatial domain. The spatial domain of the function will be extracted. \item an object of class \code{"rmhmodel"} representing a point process model that can be simulated using \code{\link[spatstat.random]{rmh}}. The window (spatial domain) of the model will be extracted. The window may be \code{NULL} in some circumstances (indicating that the simulation window has not yet been determined). This is not treated as an error, because the argument \code{fatal} defaults to \code{FALSE} for this method. \item an object of class \code{"layered"} representing a list of spatial objects. See \code{\link[spatstat.geom]{layered}}. In this case, \code{as.owin} will be applied to each of the objects in the list, and the union of these windows will be returned. \item An object of another suitable class from another package. For full details, see \code{vignette('shapefiles')}. } If the argument \code{W} is not in one of these formats and cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). When \code{W} is a data frame, the argument \code{step} can be used to specify the pixel grid spacing; otherwise, the spacing will be guessed from the data. } \seealso{ \code{\link[spatstat.geom]{as.owin}}, \code{\link[spatstat.random]{as.owin.rmhmodel}}, \code{\link[spatstat.linnet]{as.owin.lpp}}. \code{\link[spatstat.geom]{owin.object}}, \code{\link[spatstat.geom]{owin}}. Additional methods for \code{as.owin} may be provided by other packages outside the \pkg{spatstat} family. } \examples{ te <- quadrat.test(redwood, nx=3) as.owin(te) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.explore/man/lohboot.Rd0000644000176200001440000002015714611073324016310 0ustar liggesusers\name{lohboot} \alias{lohboot} \title{Bootstrap Confidence Bands for Summary Function} \description{ Computes a bootstrap confidence band for a summary function of a point process. } \usage{ lohboot(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom", "Kcross", "Lcross", "Kdot", "Ldot", "Kcross.inhom", "Lcross.inhom"), \dots, block=FALSE, global=FALSE, basicboot=FALSE, Vcorrection=FALSE, confidence=0.95, nx = 4, ny = nx, nsim=200, type=7) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{fun}{ Name of the summary function for which confidence intervals are desired: one of the strings \code{"pcf"}, \code{"Kest"}, \code{"Lest"}, \code{"pcfinhom"}, \code{"Kinhom"} \code{"Linhom"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Kdot"}, \code{"Ldot"}, \code{"Kcross.inhom"} or \code{"Lcross.inhom"}. Alternatively, the function itself; it must be one of the functions listed here. } \item{\dots}{ Arguments passed to the corresponding local version of the summary function (see Details). } \item{block}{ Logical value indicating whether to use Loh's block bootstrap as originally proposed. Default is \code{FALSE} for consistency with older code. See Details. } \item{global}{ Logical. If \code{FALSE} (the default), pointwise confidence intervals are constructed. If \code{TRUE}, a global (simultaneous) confidence band is constructed. } \item{basicboot}{ Logical value indicating whether to use the so-called basic bootstrap confidence interval. See Details. } \item{Vcorrection}{ Logical value indicating whether to use a variance correction when \code{fun="Kest"} or \code{fun="Kinhom"}. See Details. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } \item{nx,ny}{ Integers. If \code{block=TRUE}, divide the window into \code{nx*ny} rectangles. } \item{nsim}{ Number of bootstrap simulations. } \item{type}{ Integer. Type of quantiles. Argument passed to \code{\link[stats]{quantile.default}} controlling the way the quantiles are calculated. } } \value{ A function value table (object of class \code{"fv"}) containing columns giving the estimate of the summary function, the upper and lower limits of the bootstrap confidence interval, and the theoretical value of the summary function for a Poisson process. } \details{ This algorithm computes confidence bands for the true value of the summary function \code{fun} using the bootstrap method of Loh (2008) and a modification described in Baddeley, Rubak, Turner (2015). If \code{fun="pcf"}, for example, the algorithm computes a pointwise \code{(100 * confidence)}\% confidence interval for the true value of the pair correlation function for the point process, normally estimated by \code{\link{pcf}}. It starts by computing the array of \emph{local} pair correlation functions, \code{\link{localpcf}}, of the data pattern \code{X}. This array consists of the contributions to the estimate of the pair correlation function from each data point. If \code{block=FALSE}, these contributions are resampled \code{nsim} times with replacement as described in Baddeley, Rubak, Turner (2015); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. If \code{block=TRUE}, the calculation is performed as originally proposed by Loh (2008, 2010). The (bounding box of the) window is divided into \eqn{nx * ny} rectangles (blocks). The average contribution of a block is obtained by averaging the contribution of each point included in the block. Then, the average contributions on each block are resampled \code{nsim} times with replacement as described in Loh (2008) and Loh (2010); from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. Notice that for non-rectangular windows any blocks not fully contained in the window are discarded before doing the resampling, so the effective number of blocks may be substantially smaller than \eqn{nx * ny} in this case. The pointwise \code{alpha/2} and \code{1 - alpha/2} quantiles of these functions are computed, where \code{alpha = 1 - confidence}. The average of the local functions is also computed as an estimate of the pair correlation function. There are several ways to define a bootstrap confidence interval. If \code{basicbootstrap=TRUE}, the so-called basic confidence bootstrap interval is used as described in Loh (2008). It has been noticed in Loh (2010) that when the intensity of the point process is unknown, the bootstrap error estimate is larger than it should be. When the \eqn{K} function is used, an adjustment procedure has been proposed in Loh (2010) that is used if \code{Vcorrection=TRUE}. In this case, the basic confidence bootstrap interval is implicitly used. To control the estimation algorithm, use the arguments \code{\dots}, which are passed to the local version of the summary function, as shown below: \tabular{ll}{ \bold{fun} \tab \bold{local version} \cr \code{\link{pcf}} \tab \code{\link{localpcf}} \cr \code{\link{Kest}} \tab \code{\link{localK}} \cr \code{\link{Lest}} \tab \code{\link{localL}} \cr \code{\link{pcfinhom}} \tab \code{\link{localpcfinhom}} \cr \code{\link{Kinhom}} \tab \code{\link{localKinhom}} \cr \code{\link{Linhom}} \tab \code{\link{localLinhom}} \cr \code{\link{Kcross}} \tab \code{\link{localKcross}} \cr \code{\link{Lcross}} \tab \code{\link{localLcross}} \cr \code{\link{Kdot}} \tab \code{\link{localKdot}} \cr \code{\link{Ldot}} \tab \code{\link{localLdot}} \cr \code{\link{Kcross.inhom}} \tab \code{\link{localKcross.inhom}} \cr \code{\link{Lcross.inhom}} \tab \code{\link{localLcross.inhom}} } For \code{fun="Lest"}, the calculations are first performed as if \code{fun="Kest"}, and then the square-root transformation is applied to obtain the \eqn{L}-function. Similarly for \code{fun="Linhom", "Lcross", "Ldot", "Lcross.inhom"}. Note that the confidence bands computed by \code{lohboot(fun="pcf")} may not contain the estimate of the pair correlation function computed by \code{\link{pcf}}, because of differences between the algorithm parameters (such as the choice of edge correction) in \code{\link{localpcf}} and \code{\link{pcf}}. If you are using \code{lohboot}, the appropriate point estimate of the pair correlation itself is the pointwise mean of the local estimates, which is provided in the result of \code{lohboot} and is shown in the default plot. If the confidence bands seem unbelievably narrow, this may occur because the point pattern has a hard core (the true pair correlation function is zero for certain values of distance) or because of an optical illusion when the function is steeply sloping (remember the width of the confidence bands should be measured \emph{vertically}). An alternative to \code{lohboot} is \code{\link{varblock}}. } \references{ \baddrubaturnbook Loh, J.M. (2008) A valid and fast spatial bootstrap for correlation functions. \emph{The Astrophysical Journal}, \bold{681}, 726--734. Loh, J.M. (2010) Bootstrapping an inhomogeneous point process. \emph{Journal of Statistical Planning and Inference}, \bold{140}, 734--749. } \seealso{ Summary functions \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{Kinhom}}, \code{\link{pcfinhom}}, \code{\link{localK}}, \code{\link{localpcf}}, \code{\link{localKinhom}}, \code{\link{localpcfinhom}}, \code{\link{localKcross}}, \code{\link{localKdot}}, \code{\link{localLcross}}, \code{\link{localLdot}}. \code{\link{localKcross.inhom}}, \code{\link{localLcross.inhom}}. See \code{\link{varblock}} for an alternative bootstrap technique. } \examples{ p <- lohboot(simdat, stoyan=0.5) g <- lohboot(simdat, stoyan=0.5, block=TRUE) g plot(g) } \author{ \spatstatAuthors and Christophe Biscio. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pool.quadrattest.Rd0000644000176200001440000000530514611073324020151 0ustar liggesusers\name{pool.quadrattest} \alias{pool.quadrattest} \title{ Pool Several Quadrat Tests } \description{ Pool several quadrat tests into a single quadrat test. } \usage{ \method{pool}{quadrattest}(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL, CR=NULL) } \arguments{ \item{\dots}{ Any number of objects, each of which is a quadrat test (object of class \code{"quadrattest"}). } \item{df}{ Optional. Number of degrees of freedom of the test statistic. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df.est}. } \item{df.est}{ Optional. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df}. } \item{nsim}{ Number of simulations, for Monte Carlo test. } \item{Xname}{ Optional. Name of the original data. } \item{CR}{ Optional. Numeric value of the Cressie-Read exponent \code{CR} overriding the value used in the tests. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"quadrattest"}. An object of class \code{"quadrattest"} represents a \eqn{\chi^2}{chi^2} test or Monte Carlo test of goodness-of-fit for a point process model, based on quadrat counts. Such objects are created by the command \code{\link{quadrat.test}}. Each of the arguments \code{\dots} must be an object of class \code{"quadrattest"}. They must all be the same type of test (chi-squared test or Monte Carlo test, conditional or unconditional) and must all have the same type of alternative hypothesis. The test statistic of the pooled test is the Pearson \eqn{X^2} statistic taken over all cells (quadrats) of all tests. The \eqn{p} value of the pooled test is then computed using either a Monte Carlo test or a \eqn{\chi^2}{chi^2} test. For a pooled \eqn{\chi^2}{chi^2} test, the number of degrees of freedom of the combined test is computed by adding the degrees of freedom of all the tests (equivalent to assuming the tests are independent) unless it is determined by the arguments \code{df} or \code{df.est}. The resulting \eqn{p} value is computed to obtain the pooled test. For a pooled Monte Carlo test, new simulations are performed to determine the pooled Monte Carlo \eqn{p} value. } \value{ Another object of class \code{"quadrattest"}. } \seealso{ \code{\link{pool}}, \code{\link{quadrat.test}} } \examples{ Y <- split(humberside) test1 <- quadrat.test(Y[[1]]) test2 <- quadrat.test(Y[[2]]) pool(test1, test2, Xname="Humberside") } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} spatstat.explore/man/pcf.ppp.Rd0000644000176200001440000002447014643125462016220 0ustar liggesusers\name{pcf.ppp} \alias{pcf.ppp} \title{Pair Correlation Function of Point Pattern} \description{ Estimates the pair correlation function of a point pattern using kernel methods. } \usage{ \method{pcf}{ppp}(X, \dots, r = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor = c("r", "d"), var.approx = FALSE, domain=NULL, ratio=FALSE, close=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. Either a single numeric value giving the standard deviation of the kernel, or a character string specifying a bandwidth selection rule recognised by \code{\link{density.default}}. If \code{bw} is missing or \code{NULL}, the default value is computed using Stoyan's rule of thumb: see Details. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Coefficient for Stoyan's bandwidth selection rule; see Details. } \item{correction}{ Edge correction. A character vector specifying the choice (or choices) of edge correction. See Details. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } \item{var.approx}{ Logical value indicating whether to compute an analytic approximation to the variance of the estimated pair correlation. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{close}{ Advanced use only. Precomputed data. See section on Advanced Use. } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g(r)} estimated by Ripley isotropic correction } \item{v}{vector of approximate values of the variance of the estimate of \eqn{g(r)} } as required. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. The return value also has an attribute \code{"bw"} giving the smoothing bandwidth that was used. } \details{ The pair correlation function \eqn{g(r)} is a summary of the dependence between points in a spatial point process. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda^2 g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda^2 * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity of the point process. For a completely random (uniform Poisson) process, \eqn{p(r) = \lambda^2 \,{\rm d}x \, {\rm d}y}{p(r) = lambda^2 dx dy} so \eqn{g(r) = 1}. Formally, the pair correlation function of a stationary point process is defined by \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. This routine computes an estimate of \eqn{g(r)} by kernel smoothing. \itemize{ \item If \code{divisor="r"} (the default), then the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used (Guan, 2007): the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \itemize{ \item If \code{correction="translate"} or \code{correction="translation"} then the translation correction is used. For \code{divisor="r"} the translation-corrected estimate is given in equation (15.15), page 284 of Stoyan and Stoyan (1994). \item If \code{correction="Ripley"} or \code{correction="isotropic"} then Ripley's isotropic edge correction is used. For \code{divisor="r"} the isotropic-corrected estimate is given in equation (15.18), page 285 of Stoyan and Stoyan (1994). \item If \code{correction="none"} then no edge correction is used, that is, an uncorrected estimate is computed. } Multiple corrections can be selected. The default is \code{correction=c("translate", "Ripley")}. Alternatively \code{correction="all"} selects all options; \code{correction="best"} selects the option which has the best statistical performance; \code{correction="good"} selects the option which is the best compromise between statistical performance and speed of computation. The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density.default}}. The default is the Epanechnikov kernel, recommended by Stoyan and Stoyan (1994, page 285). The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Bandwidth is defined as the standard deviation of the kernel; see the documentation for \code{\link{density.default}}. For the Epanechnikov kernel with half-width \code{h}, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. Stoyan and Stoyan (1994, page 285) recommend using the Epanechnikov kernel with support \eqn{[-h,h]} chosen by the rule of thumn \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. See equation (15.16). If \code{bw} is missing or \code{NULL}, then this rule of thumb will be applied. The argument \code{stoyan} determines the value of \eqn{c}. The smoothing bandwidth that was used in the calculation is returned as an attribute of the final result. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. If it is specified, \code{r} must be a vector of increasing numbers starting from \code{r[1] = 0}, and \code{max(r)} must not exceed half the diameter of the window. If the argument \code{domain} is given, estimation will be restricted to this region. That is, the estimate of \eqn{g(r)} will be based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link[spatstat.geom]{as.owin}}. It must be a subset of the window of the point pattern \code{X}. To compute a confidence band for the true value of the pair correlation function, use \code{\link{lohboot}}. If \code{var.approx = TRUE}, the variance of the estimate of the pair correlation will also be calculated using an analytic approximation (Illian et al, 2008, page 234) which is valid for stationary point processes which are not too clustered. This calculation is not yet implemented when the argument \code{domain} is given. } \section{Advanced Use}{ To perform the same computation using several different bandwidths \code{bw}, it is efficient to use the argument \code{close}. This should be the result of \code{\link[spatstat.geom]{closepairs}(X, rmax)} for a suitably large value of \code{rmax}, namely \code{rmax >= max(r) + 3 * bw}. } \references{ Guan, Y. (2007) A least-squares cross-validation bandwidth selection approach in pair correlation function estimation. \emph{Statistics and Probability Letters} \bold{77} (18) 1722--1729. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical Analysis and Modelling of Spatial Point Patterns.} Wiley. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{density.default}}, \code{\link{bw.stoyan}}, \code{\link{bw.pcf}}, \code{\link{lohboot}}. } \examples{ X <- simdat \testonly{ X <- X[seq(1,npoints(X), by=4)] } p <- pcf(X) plot(p, main="pair correlation function for X") # indicates inhibition at distances r < 0.3 pd <- pcf(X, divisor="d") # compare estimates plot(p, cbind(iso, theo) ~ r, col=c("blue", "red"), ylim.covers=0, main="", lwd=c(2,1), lty=c(1,3), legend=FALSE) plot(pd, iso ~ r, col="green", lwd=2, add=TRUE) legend("center", col=c("blue", "green"), lty=1, lwd=2, legend=c("divisor=r","divisor=d")) # calculate approximate variance and show POINTWISE confidence bands pv <- pcf(X, var.approx=TRUE) plot(pv, cbind(iso, iso+2*sqrt(v), iso-2*sqrt(v)) ~ r) } \author{ \spatstatAuthors and \martinH. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/bw.CvL.Rd0000644000176200001440000000621414611073323015732 0ustar liggesusers\name{bw.CvL} \alias{bw.CvL} \title{ Cronie and van Lieshout's Criterion for Bandwidth Selection for Kernel Density } \description{ Uses Cronie and van Lieshout's criterion based on Cambell's formula to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.CvL(X, \dots, srange = NULL, ns = 16, sigma = NULL, warn=TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{warn}{ Logical. If \code{TRUE}, a warning is issued if the optimal value of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{\sigma} is chosen to minimise the discrepancy between the area of the observation window and the sum of reciprocal estimated intensity values at the points of the point process \deqn{ \mbox{CvL}(\sigma) = (|W| - \sum_i 1/\hat\lambda(x_i))^2 }{ CvL(\sigma) = (|W| - sum[i] 1/\lambda(x[i]))^2 } where the sum is taken over all the data points \eqn{x_i}{x[i]}, and where \eqn{\hat\lambda(x_i)}{\lambda(x[i])} is the kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}. The value of \eqn{\mbox{CvL}(\sigma)}{CvL(\sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. } \value{ A single numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} (see \code{\link[spatstat.explore]{bw.optim.object}}) which can be plotted to show the bandwidth selection criterion as a function of \code{sigma}. } \seealso{ \code{\link{density.ppp}}, \code{\link[spatstat.explore]{bw.optim.object}}. Alternative methods: \code{\link{bw.diggle}}, \code{\link{bw.scott}}, \code{\link{bw.ppl}}, \code{\link{bw.frac}}. For adaptive smoothing bandwidths, use \code{\link{bw.CvL.adaptive}}. } \examples{ if(interactive()) { b <- bw.CvL(redwood) b plot(b, main="Cronie and van Lieshout bandwidth criterion for redwoods") plot(density(redwood, b)) plot(density(redwood, bw.CvL)) } \testonly{ b <- bw.CvL(redwood, srange=c(0.03, 0.07), ns=2) } } \references{ Cronie, O and Van Lieshout, M N M (2018) A non-model-based approach to bandwidth selection for kernel estimators of spatial intensity functions, \emph{Biometrika}, \bold{105}, 455-462. } \author{ \ottmar and \colette. Adapted for \pkg{spatstat} by \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Bandwidth selection} spatstat.explore/man/cov.im.Rd0000644000176200001440000000416614611073323016036 0ustar liggesusers\name{cov.im} \alias{cov.im} \alias{cor.im} \title{ Covariance and Correlation between Images } \description{ Compute the covariance or correlation between (the corresponding pixel values in) several images. } \usage{ cov.im(\dots, use = "everything", method = c("pearson", "kendall", "spearman")) } \arguments{ \item{\dots}{ Any number of arguments, each of which is a pixel image (object of class \code{"im"}). Alternatively, a single argument which is a list of pixel images. } \item{use}{ Argument passed to \code{\link[stats]{cov}} or \code{\link[stats]{cor}} determining how to handle \code{NA} values in the data. } \item{method}{ Argument passed to \code{\link[stats]{cov}} or \code{\link[stats]{cor}} determining the type of correlation that will be computed. } } \details{ The arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. These functions compute the covariance or correlation between the corresponding pixel values in the images given. The pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Finally the correlation or covariance between the pixel values of each pair of images, at corresponding pixels, is computed. The result is a symmetric matrix with one row and column for each image. The \code{[i,j]} entry is the correlation or covariance between the \code{i}th and \code{j}th images in the argument list. The row names and column names of the matrix are copied from the argument names if they were given (i.e. if the arguments were given as \code{name=value}). Note that \code{\link[stats]{cor}} and \code{\link[stats]{cov}} are not generic, so you have to type \code{cor.im}, \code{cov.im}. } \value{ A symmetric matrix. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{cor}}, \code{\link[stats]{cov}} \code{\link{pairs.im}} } \examples{ cor.im(bei.extra) } \keyword{spatial} \keyword{univar} \keyword{nonparametric} spatstat.explore/man/G3est.Rd0000644000176200001440000000747214643125461015641 0ustar liggesusers\name{G3est} \Rdversion{1.1} \alias{G3est} \title{ Nearest Neighbour Distance Distribution Function of a Three-Dimensional Point Pattern } \description{ Estimates the nearest-neighbour distance distribution function \eqn{G_3(r)}{G3(r)} from a three-dimensional point pattern. } \usage{ G3est(X, ..., rmax = NULL, nrval = 128, correction = c("rs", "km", "Hanisch")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the nearest-neighbour function is \deqn{ G_3(r) = P(d^\ast(x,\Phi) \le r \mid x \in \Phi) }{ G3(r) = P(d*(x,Phi) <= r | x in Phi) } the cumulative distribution function of the distance \eqn{d^\ast(x,\Phi)}{d*(x,Phi)} from a typical point \eqn{x} in \eqn{\Phi}{Phi} to its nearest neighbour, i.e. to the nearest \emph{other} point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The nearest neighbour function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. For each data point, the distance to the nearest neighbour is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{G_3(r)}{G3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"Hanisch"}:}{ the three-dimensional generalisation of the Hanisch estimator (Hanisch, 1984). } } Alternatively \code{correction="all"} selects all options. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. } \author{ \adrian and Rana Moyeed. } \section{Warnings}{ A large value of \code{nrval} is required in order to avoid discretisation effects (due to the use of histograms in the calculation). } \seealso{ \code{\link[spatstat.geom]{pp3}} to create a three-dimensional point pattern (object of class \code{"pp3"}). \code{\link{F3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} for other summary functions of a three-dimensional point pattern. \code{\link{Gest}} to estimate the empty space function of point patterns in two dimensions. } \examples{ X <- rpoispp3(42) Z <- G3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} \concept{Three-dimensional} spatstat.explore/man/Smooth.ppp.Rd0000644000176200001440000002243614643125462016721 0ustar liggesusers\name{Smooth.ppp} \alias{Smooth.ppp} \alias{markmean} \alias{markvar} \title{Spatial smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations. Uses kernel smoothing and least-squares cross-validated bandwidth selection. } \usage{ \method{Smooth}{ppp}(X, sigma=NULL, ..., weights = rep(1, npoints(X)), at = "pixels", leaveoneout=TRUE, adjust = 1, varcov = NULL, edge = TRUE, diggle = FALSE, kernel = "gaussian", scalekernel = is.character(kernel), se = FALSE, loctype = c("random", "fixed"), wtype = c("multiplicity", "importance"), geometric = FALSE) markmean(X, ...) markvar(X, sigma=NULL, ..., weights=NULL, varcov=NULL) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Smoothing bandwidth. A single positive number, a numeric vector of length 2, or a function that selects the bandwidth automatically. See \code{\link{density.ppp}}. } \item{\dots}{ Further arguments passed to \code{\link{bw.smoothppp}} and \code{\link{density.ppp}} to control the kernel smoothing and the pixel resolution of the result. } \item{weights}{ Optional weights attached to the observations. A numeric vector, a \code{function(x,y)}, a pixel image, or an \code{expression}. See \code{\link{density.ppp}}. } \item{at}{ String specifying whether to compute the smoothed values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{edge,diggle}{ Arguments passed to \code{\link{density.ppp}} to determine the edge correction. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{varcov}{ Variance-covariance matrix. An alternative to \code{sigma}. See \code{\link{density.ppp}}. } \item{kernel}{ The smoothing kernel. A character string specifying the smoothing kernel (current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}), or a pixel image (object of class \code{"im"}) containing values of the kernel, or a \code{function(x,y)} which yields values of the kernel. } \item{scalekernel}{ Logical value. If \code{scalekernel=TRUE}, then the kernel will be rescaled to the bandwidth determined by \code{sigma} and \code{varcov}: this is the default behaviour when \code{kernel} is a character string. If \code{scalekernel=FALSE}, then \code{sigma} and \code{varcov} will be ignored: this is the default behaviour when \code{kernel} is a function or a pixel image. } \item{se}{ Logical value specifying whether to calculate standard errors. This calculation is experimental. } \item{loctype}{ Character string (partially matched) specifying whether the point locations are assumed to be fixed or random, in the calculation of standard error. Experimental. } \item{wtype}{ Character string (partially matched) specifying whether the weights should be interpreted as multiplicities or as importance weights, in the calculation of standard error. Experimental. } \item{geometric}{ Logical value indicating whether to perform geometric mean smoothing instead of arithmetic mean smoothing. See Details. } } \details{ The function \code{Smooth.ppp} performs spatial smoothing of numeric values observed at a set of irregular locations. The functions \code{markmean} and \code{markvar} are wrappers for \code{Smooth.ppp} which compute the spatially-varying mean and variance of the marks of a point pattern. \code{Smooth.ppp} is a method for the generic function \code{\link{Smooth}} for the class \code{"ppp"} of point patterns. Thus you can type simply \code{Smooth(X)}. Smoothing is performed by kernel weighting, using the Gaussian kernel by default. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i}{\sum_i k(u-x_i)} }{ g(u) = (sum of k(u-x[i]) v[i])/(sum of k(u-x[i])) } where \eqn{k} is the kernel (a Gaussian kernel by default). This is known as the Nadaraya-Watson smoother (Nadaraya, 1964, 1989; Watson, 1964). By default, the smoothing kernel bandwidth is chosen by least squares cross-validation (see below). The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame (in \code{Smooth.ppp} and \code{markmean}). Then the smoothing procedure is applied to each column of marks. The numerator and denominator are computed by \code{\link{density.ppp}}. The arguments \code{...} control the smoothing kernel parameters and determine whether edge correction is applied. The smoothing kernel bandwidth can be specified by either of the arguments \code{sigma} or \code{varcov} which are passed to \code{\link{density.ppp}}. If neither of these arguments is present, then by default the bandwidth is selected by least squares cross-validation, using \code{\link{bw.smoothppp}}. The optional argument \code{weights} allows numerical weights to be applied to the data. If a weight \eqn{w_i}{w[i]} is associated with location \eqn{x_i}{x[i]}, then the smoothed function is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i w_i}{\sum_i k(u-x_i) w_i} }{ g(u) = (sum of k(u-x[i]) v[i] w[i])/(sum of k(u-x[i]) w[i]) } If \code{geometric=TRUE} then geometric mean smoothing is performed instead of arithmetic mean smoothing. The mark values must be non-negative numbers. The logarithm of the mark values is computed; these logarithmic values are kernel-smoothed as described above; then the exponential function is applied to the smoothed values. An alternative to kernel smoothing is inverse-distance weighting, which is performed by \code{\link{idw}}. } \section{Very small bandwidth}{ If the chosen bandwidth \code{sigma} is very small, kernel smoothing is mathematically equivalent to nearest-neighbour interpolation; the result will be computed by \code{\link[spatstat.geom]{nnmark}}. This is unless \code{at="points"} and \code{leaveoneout=FALSE}, when the original mark values are returned. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{"solist"}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. } \seealso{ \code{\link{Smooth}}, \code{\link{density.ppp}}, \code{\link{bw.smoothppp}}, \code{\link[spatstat.geom]{nnmark}}, \code{\link[spatstat.geom]{ppp.object}}, \code{\link[spatstat.geom]{im.object}}. See \code{\link{idw}} for inverse-distance weighted smoothing. To perform interpolation, see also the \code{akima} package. } \examples{ # Longleaf data - tree locations, marked by tree diameter # Local smoothing of tree diameter (automatic bandwidth selection) Z <- Smooth(longleaf) # Kernel bandwidth sigma=5 plot(Smooth(longleaf, 5)) # mark variance plot(markvar(longleaf, sigma=5)) # data frame of marks: trees marked by diameter and height plot(Smooth(finpines, sigma=2)) head(Smooth(finpines, sigma=2, at="points")) } \author{ \spatstatAuthors. } \references{ Nadaraya, E.A. (1964) On estimating regression. \emph{Theory of Probability and its Applications} \bold{9}, 141--142. Nadaraya, E.A. (1989) \emph{Nonparametric estimation of probability densities and regression curves}. Kluwer, Dordrecht. Watson, G.S. (1964) Smooth regression analysis. \emph{Sankhya A} \bold{26}, 359--372. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/circdensity.Rd0000644000176200001440000000262714611073323017163 0ustar liggesusers\name{circdensity} \alias{circdensity} \title{ Density Estimation for Circular Data } \description{ Computes a kernel smoothed estimate of the probability density for angular data. } \usage{ circdensity(x, sigma = "nrd0", \dots, bw = NULL, weights=NULL, unit = c("degree", "radian")) } \arguments{ \item{x}{ Numeric vector, containing angular data. } \item{sigma}{ Smoothing bandwidth, or bandwidth selection rule, passed to \code{\link[stats]{density.default}}. } \item{bw}{Alternative to \code{sigma} for consistency with other functions.} \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}, such as \code{kernel} and \code{weights}. } \item{weights}{ Optional numeric vector of weights for the data in \code{x}. } \item{unit}{ The unit of angle in which \code{x} is expressed. } } \details{ The angular values \code{x} are smoothed using (by default) the wrapped Gaussian kernel with standard deviation \code{sigma}. } \value{ An object of class \code{"density"} (produced by \code{\link[stats]{density.default}}) which can be plotted by \code{plot} or by \code{\link{rose}}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{density.default}}), \code{\link{rose}}. } \examples{ ang <- runif(1000, max=360) rose(circdensity(ang, 12)) } \keyword{nonparametric} \keyword{smooth} spatstat.explore/man/fv.Rd0000644000176200001440000001665414611073324015264 0ustar liggesusers\name{fv} \alias{fv} \title{ Create a Function Value Table } \description{ Advanced Use Only. This low-level function creates an object of class \code{"fv"} from raw numerical data. } \usage{ fv(x, argu = "r", ylab = NULL, valu, fmla = NULL, alim = NULL, labl = names(x), desc = NULL, unitname = NULL, fname = NULL, yexp = ylab) } \arguments{ \item{x}{ A data frame with at least 2 columns containing the values of the function argument and the corresponding values of (one or more versions of) the function. } \item{argu}{ String. The name of the column of \code{x} that contains the values of the function argument. } \item{ylab}{ Either \code{NULL}, or an \R language expression representing the mathematical name of the function. See Details. } \item{valu}{ String. The name of the column of \code{x} that should be taken as containing the function values, in cases where a single column is required. } \item{fmla}{ Either \code{NULL}, or a \code{formula} specifying the default plotting behaviour. See Details. } \item{alim}{ Optional. The default range of values of the function argument for which the function will be plotted. Numeric vector of length 2. } \item{labl}{ Optional. Plot labels for the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{desc}{ Optional. Descriptions of the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{unitname}{ Optional. Name of the unit (usually a unit of length) in which the function argument is expressed. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{fname}{ Optional. The name of the function itself. A character string. } \item{yexp}{ Optional. Alternative form of \code{ylab} more suitable for annotating an axis of the plot. See Details. } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. Other users please see \code{\link{fv.object}}. The low-level function \code{fv} is used to create an object of class \code{"fv"} from raw numerical data. The data frame \code{x} contains the numerical data. It should have one column (typically but not necessarily named \code{"r"}) giving the values of the function argument for which the function has been evaluated; and at least one other column, containing the corresponding values of the function. Typically there is more than one column of function values. These columns typically give the values of different versions or estimates of the same function, for example, different estimates of the \eqn{K} function obtained using different edge corrections. However they may also contain the values of related functions such as the derivative or hazard rate. \code{argu} specifies the name of the column of \code{x} that contains the values of the function argument (typically \code{argu="r"} but this is not compulsory). \code{valu} specifies the name of another column that contains the \sQuote{recommended} estimate of the function. It will be used to provide function values in those situations where a single column of data is required. For example, \code{\link{envelope}} computes its simulation envelopes using the recommended value of the summary function. \code{fmla} specifies the default plotting behaviour. It should be a formula, or a string that can be converted to a formula. Variables in the formula are names of columns of \code{x}. See \code{\link{plot.fv}} for the interpretation of this formula. \code{alim} specifies the recommended range of the function argument. This is used in situations where statistical theory or statistical practice indicates that the computed estimates of the function are not trustworthy outside a certain range of values of the function argument. By default, \code{\link{plot.fv}} will restrict the plot to this range. \code{fname} is a string giving the name of the function itself. For example, the \eqn{K} function would have \code{fname="K"}. \code{ylab} is a mathematical expression for the function value, used when labelling an axis of the plot, or when printing a description of the function. It should be an \R language object. For example the \eqn{K} function's mathematical name \eqn{K(r)} is rendered by \code{ylab=quote(K(r))}. If \code{yexp} is present, then \code{ylab} will be used only for printing, and \code{yexp} will be used for annotating axes in a plot. (Otherwise \code{yexp} defaults to \code{ylab}). For example the cross-type \eqn{K} function \eqn{K_{1,2}(r)}{K[1,2](r)} is rendered by something like \code{ylab=quote(Kcross[1,2](r))} and \code{yexp=quote(Kcross[list(1,2)](r))} to get the most satisfactory behaviour. (A useful tip: use \code{\link{substitute}} instead of \code{\link{quote}} to insert values of variables into an expression, e.g. \code{substitute(Kcross[i,j](r), list(i=42,j=97))} yields the same as \code{quote(Kcross[42, 97](r))}.) \code{labl} is a character vector specifying plot labels for each column of \code{x}. These labels will appear on the plot axes (in non-default plots), legends and printed output. Entries in \code{labl} may contain the string \code{"\%s"} which will be replaced by \code{fname}. For example the border-corrected estimate of the \eqn{K} function has label \code{"\%s[bord](r)"} which becomes \code{"K[bord](r)"}. \code{desc} is a character vector containing intelligible explanations of each column of \code{x}. Entries in \code{desc} may contain the string \code{"\%s"} which will be replaced by \code{ylab}. For example the border correction estimate of the \eqn{K} function has description \code{"border correction estimate of \%s"}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}. } \seealso{ See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. Simple calculations such as arithmetic and mathematical operations can be computed directly. The range of \eqn{y} values of a function \code{f} can be computed by typing \code{range(f)}. These operations are dispatched to \code{\link{Summary.fv}}, \code{\link{Math.fv}} and \code{\link{Ops.fv}}. Use \code{\link{eval.fv}} or \code{\link{with.fv}} for more complicated calculations. The functions \code{fvnames}, \code{fvnames<-} allow the user to use standard abbreviations to refer to columns of an \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{tweak.fv.entry} and \code{rebadge.fv}. } \author{\adrian and \rolf.} \examples{ df <- data.frame(r=seq(0,5,by=0.1)) df <- transform(df, a=pi*r^2, b=3*r^2) X <- fv(df, "r", quote(A(r)), "a", cbind(a, b) ~ r, alim=c(0,4), labl=c("r", "\%s[true](r)", "\%s[approx](r)"), desc=c("radius of circle", "true area \%s", "rough area \%s"), fname="A") X } \keyword{spatial} \keyword{classes} spatstat.explore/man/integral.fv.Rd0000644000176200001440000000302414611073324017053 0ustar liggesusers\name{integral.fv} \alias{integral.fv} \title{ Compute Integral of Function Object } \description{ Compute the integral of a function over a specified range. } \usage{ \method{integral}{fv}(f, domain = NULL, ...) } \arguments{ \item{f}{ A function value table (object of class \code{"fv"}). } \item{domain}{ Optional. Range of values of the argument \eqn{x} over which the density \eqn{f(x)} should be integrated. A numeric vector of length 2 giving the minimum and maximum values of \eqn{x}. Infinite limits are permitted. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[spatstat.univar]{integral}}. It computes the numerical integral \deqn{ I = \int f(x) dx }{ I = integral f(x) dx } of the function object \code{f}. If \code{domain} is specified, the integral is restricted to the interval of \eqn{x} values given by the \code{domain}. The result is a numeric value or numeric vector containing one entry for each column of function values in \code{f}. Integrals are calculated numerically using the trapezoidal rule restricted to the domain given. } \value{ A single numerical value, or a numeric vector. } \author{ \adrian. } \seealso{ \code{\link[spatstat.explore]{fv.object}}, \code{\link[spatstat.univar]{integral}} \code{\link[spatstat.univar]{stieltjes}} } \examples{ g <- pcf(redwood, divisor="d") integral(g, domain=c(0, 0.1)) } \keyword{methods} \keyword{univar} \keyword{nonparametric} \keyword{math} spatstat.explore/man/blur.Rd0000644000176200001440000000732214643125461015612 0ustar liggesusers\name{blur} \alias{blur} \alias{Smooth.im} \title{Apply Gaussian Blur to a Pixel Image} \description{ Applies a Gaussian blur to a pixel image. } \usage{ blur(x, sigma = NULL, \dots, kernel="gaussian", normalise=FALSE, bleed = TRUE, varcov=NULL) \method{Smooth}{im}(X, sigma = NULL, \dots, kernel="gaussian", normalise=FALSE, bleed = TRUE, varcov=NULL) } \arguments{ \item{x,X}{The pixel image. An object of class \code{"im"}.} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Ignored. } \item{kernel}{ String (partially matched) specifying the smoothing kernel. Current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}. } \item{normalise}{ Logical flag indicating whether the output values should be divided by the corresponding blurred image of the window itself. See Details. } \item{bleed}{ Logical flag indicating whether to allow blur to extend outside the original domain of the image. See Details. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } } \details{ This command applies a Gaussian blur to the pixel image \code{x}. \code{Smooth.im} is a method for the generic \code{\link{Smooth}} for pixel images. It is currently identical to \code{blur}, apart from the name of the first argument. The blurring kernel is the isotropic Gaussian kernel with standard deviation \code{sigma}, or the anisotropic Gaussian kernel with variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. Also \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(sigma^2)}. If the pixel values of \code{x} include some \code{NA} values (meaning that the image domain does not completely fill the rectangular frame) then these \code{NA} values are first reset to zero. The algorithm then computes the convolution \eqn{x \ast G}{x * G} of the (zero-padded) pixel image \eqn{x} with the specified Gaussian kernel \eqn{G}. If \code{normalise=FALSE}, then this convolution \eqn{x\ast G}{x * G} is returned. If \code{normalise=TRUE}, then the convolution \eqn{x \ast G}{x * G} is normalised by dividing it by the convolution \eqn{w \ast G}{w * G} of the image domain \code{w} with the same Gaussian kernel. Normalisation ensures that the result can be interpreted as a weighted average of input pixel values, without edge effects due to the shape of the domain. If \code{bleed=FALSE}, then pixel values outside the original image domain are set to \code{NA}. Thus the output is a pixel image with the same domain as the input. If \code{bleed=TRUE}, then no such alteration is performed, and the result is a pixel image defined everywhere in the rectangular frame containing the input image. Computation is performed using the Fast Fourier Transform. } \value{ A pixel image with the same pixel array as the input image \code{x}. } \seealso{ \code{\link[spatstat.geom]{interp.im}} for interpolating a pixel image to a finer resolution, \code{\link{density.ppp}} for blurring a point pattern, \code{\link{Smooth.ppp}} for interpolating marks attached to points. } \examples{ Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) opa <- par(mfrow=c(1,3)) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) par(opa) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} \keyword{manip} spatstat.explore/man/Smooth.ssf.Rd0000644000176200001440000000160314611073325016701 0ustar liggesusers\name{Smooth.ssf} \alias{Smooth.ssf} \title{ Smooth a Spatially Sampled Function } \description{ Applies kernel smoothing to a spatially sampled function. } \usage{ \method{Smooth}{ssf}(X, \dots) } \arguments{ \item{X}{ Object of class \code{"ssf"}. } \item{\dots}{ Arguments passed to \code{\link[spatstat.explore]{Smooth.ppp}} to control the smoothing. } } \details{ An object of class \code{"ssf"} represents a real-valued or vector-valued function that has been evaluated or sampled at an irregular set of points. The function values will be smoothed using a Gaussian kernel. } \value{ A pixel image or a list of pixel images. } \author{ \adrian. } \seealso{ \code{\link{ssf}}, \code{\link[spatstat.explore]{Smooth.ppp}} } \examples{ f <- ssf(redwood, nndist(redwood)) Smooth(f, sigma=0.1) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.explore/man/Jcross.Rd0000644000176200001440000001607214643125461016113 0ustar liggesusers\name{Jcross} \alias{Jcross} \title{ Multitype J Function (i-to-j) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and of type \eqn{j}. } \usage{ Jcross(X, i, j, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{ij}(r)}{Jij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. Alternatively \code{correction="all"} selects all options. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{ij}(r)}{Jij(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{ij}(r)}{Jij(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{ij}(r)}{1 - Gij(r)} and \eqn{1 - F_{j}(r)}{1 - Fj(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{ij}(r)}{Jij(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gcross}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jcross} and its companions \code{\link{Jdot}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``type \eqn{i} to type \eqn{j}'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{ij}(r) = \frac{1 - G_{ij}(r)}{1 - F_{j}(r)}}{Jij(r) = (1 - Gij(r))/(1-Fj(r))} where \eqn{G_{ij}(r)}{Gij(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest point of type \eqn{j}, and \eqn{F_{j}(r)}{Fj(r)} is the distribution function of the distance from a fixed point in space to the nearest point of type \eqn{j} in the pattern. An estimate of \eqn{J_{ij}(r)}{Jij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points is independent of the subprocess of points of type \eqn{j}, then \eqn{J_{ij}(r) \equiv 1}{Jij(r) = 1}. Hence deviations of the empirical estimate of \eqn{J_{ij}}{Jij} from the value 1 may suggest dependence between types. This algorithm estimates \eqn{J_{ij}(r)}{Jij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jdot}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{ woods <- woods[seq(1,npoints(woods), by=30)] } Jhm <- Jcross(woods, "hickory", "maple") # diagnostic plot for independence between hickories and maples plot(Jhm) # synthetic example with two types "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jcross(pp) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/as.function.fv.Rd0000644000176200001440000000740014611073323017476 0ustar liggesusers\name{as.function.fv} \alias{as.function.fv} \title{ Convert Function Value Table to Function } \description{ Converts an object of class \code{"fv"} to an \R language function. } \usage{ \method{as.function}{fv}(x, ..., value=".y", extrapolate=FALSE) } \arguments{ \item{x}{ Object of class \code{"fv"} or \code{"rhohat"}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ A function value table (object of class \code{"fv"}) is a convenient way of storing and plotting several different estimates of the same function. Objects of this class are returned by many commands in \pkg{spatstat}, such as \code{\link[spatstat.explore]{Kest}}, which returns an estimate of Ripley's \eqn{K}-function for a point pattern dataset. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.fv}. It converts an object \code{x} of class \code{"fv"} to an \R function \code{f}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). To apply different rules to the left and right extremes, use \code{extrapolate=c(TRUE,FALSE)} and so on. Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. The command \code{as.function.fv} is a method for the generic command \code{\link{as.function}}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ \adrian and \rolf } \seealso{ \code{\link[spatstat.explore]{as.function.rhohat}}, \code{\link{fv}}, \code{\link{fv.object}}, \code{\link{fvnames}}, \code{\link{plot.fv}}, \code{\link[spatstat.explore]{Kest}} } \examples{ K <- Kest(cells) f <- as.function(K) f f(0.1) g <- as.function(K, value=c("iso", "trans")) g g(0.1, "trans") } \keyword{spatial} \keyword{methods} spatstat.explore/man/hotbox.Rd0000644000176200001440000000550514611073322016143 0ustar liggesusers\name{hotbox} \alias{hotbox} \title{ Heat Kernel for a Two-Dimensional Rectangle } \description{ Calculate values of the heat kernel in a rectangle with insulated edges. } \usage{ hotbox(Xsource, Xquery, sigma, \dots, W=NULL, squared=FALSE, nmax=20) } \arguments{ \item{Xsource}{ Point pattern of sources of heat. Object of class \code{"ppp"} or convertible to a point pattern using \code{as.ppp(Xsource, W)}. } \item{Xquery}{ Locations where the heat kernel value is required. An object of class \code{"ppp"} specifying query location points, or an object of class \code{"im"} or \code{"owin"} specifying a grid of query points. } \item{sigma}{ Bandwidth for kernel. A single number. } \item{\dots}{ Extra arguments (passed to \code{\link[spatstat.geom]{as.mask}}) controlling the pixel resolution of the result, when \code{Xquery} is a window or an image. } \item{W}{ Window (object of class \code{"owin"}) used to define the spatial domain when \code{Xsource} is not of class \code{"ppp"}. } \item{squared}{ Logical value indicating whether to take the square of each heat kernel value, before summing over the source points. } \item{nmax}{ Number of terms to be used from the infinite-sum expression for the heat kernel. A single integer. } } \details{ This function computes the sum of heat kernels associated with each of the source points, evaluating them at each query location. The window for evaluation of the heat kernel must be a rectangle. The heat kernel in any region can be expressed as an infinite sum of terms associated with the eigenfunctions of the Laplacian. The heat kernel in a rectangle is the product of heat kernels for one-dimensional intervals on the horizontal and vertical axes. This function uses \code{\link[spatstat.univar]{hotrod}} to compute the one-dimensional heat kernels, truncating the infinite sum to the first \code{nmax} terms, and then calculates the two-dimensional heat kernel from each source point to each query location. If \code{squared=TRUE} these values are squared. Finally the values are summed over all source points to obtain a single value for each query location. } \value{ If \code{Xquery} is a point pattern, the result is a numeric vector with one entry for each query point. If \code{Xquery} is an image or window, the result is a pixel image. } \seealso{ \code{\link{densityHeat.ppp}} } \references{ Baddeley, A., Davies, T., Rakshit, S., Nair, G. and McSwiggan, G. (2021) Diffusion smoothing for spatial point patterns. \emph{Statistical Science}, in press. } \author{ Adrian Baddeley and Greg McSwiggan. } \examples{ X <- runifpoint(10) Y <- runifpoint(5) hotbox(X, Y, 0.1) plot(hotbox(X, Window(X), 0.1)) points(X, pch=16) } \keyword{math} spatstat.explore/man/as.fv.Rd0000644000176200001440000000601414611073323015652 0ustar liggesusers\name{as.fv} \alias{as.fv} \alias{as.fv.fv} \alias{as.fv.fasp} \alias{as.fv.data.frame} \alias{as.fv.matrix} \alias{as.fv.bw.optim} \title{Convert Data To Class fv} \description{ Converts data into a function table (an object of class \code{"fv"}). } \usage{ as.fv(x) \method{as.fv}{fv}(x) \method{as.fv}{data.frame}(x) \method{as.fv}{matrix}(x) \method{as.fv}{fasp}(x) \method{as.fv}{bw.optim}(x) } \arguments{ \item{x}{Data which will be converted into a function table} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This command converts data \code{x}, that could be interpreted as the values of a function, into a function value table (object of the class \code{"fv"} as described in \code{\link{fv.object}}). This object can then be plotted easily using \code{\link{plot.fv}}. The dataset \code{x} may be any of the following: \itemize{ \item an object of class \code{"fv"}; \item a matrix or data frame with at least two columns; \item an object of class \code{"fasp"}, representing an array of \code{"fv"} objects. \item an object of class \code{"minconfit"}, giving the results of a minimum contrast fit by the command \code{\link[spatstat.model]{mincontrast}}. The \item an object of class \code{"kppm"}, representing a fitted Cox or cluster point process model, obtained from the model-fitting command \code{\link[spatstat.model]{kppm}}; \item an object of class \code{"dppm"}, representing a fitted determinantal point process model, obtained from the model-fitting command \code{\link[spatstat.model]{dppm}}; \item an object of class \code{"bw.optim"}, representing an optimal choice of smoothing bandwidth by a cross-validation method, obtained from commands like \code{\link[spatstat.explore]{bw.diggle}}. } The function \code{as.fv} is generic, with methods for each of the classes listed above. The behaviour is as follows: \itemize{ \item If \code{x} is an object of class \code{"fv"}, it is returned unchanged. \item If \code{x} is a matrix or data frame, the first column is interpreted as the function argument, and subsequent columns are interpreted as values of the function computed by different methods. \item If \code{x} is an object of class \code{"fasp"} representing an array of \code{"fv"} objects, these are combined into a single \code{"fv"} object. \item If \code{x} is an object of class \code{"minconfit"}, or an object of class \code{"kppm"} or \code{"dppm"}, the result is a function table containing the observed summary function and the best fit summary function. \item If \code{x} is an object of class \code{"bw.optim"}, the result is a function table of the optimisation criterion as a function of the smoothing bandwidth. } } \examples{ r <- seq(0, 1, length=101) x <- data.frame(r=r, y=r^2) as.fv(x) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.explore/man/Kest.Rd0000644000176200001440000003300414643125461015550 0ustar liggesusers\name{Kest} \alias{Kest} \title{K-function} \description{ Estimates Ripley's reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape. } \usage{ Kest(X, \dots, r=NULL, rmax=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. If necessary, specify \code{rmax}. } \item{rmax}{ Optional. Maximum desired value of the argument \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"rigid"}, \code{"none"}, \code{"periodic"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed (by default), using a fast algorithm. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{var.approx}{Logical. If \code{TRUE}, the approximate variance of \eqn{\hat K(r)}{Kest(r)} under CSR will also be computed. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K(r)} obtained by the edge corrections named. If \code{var.approx=TRUE} then the return value also has columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat K(r)}{Kest(r)} under CSR. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The \eqn{K} function (variously called ``Ripley's K-function'' and the ``reduced second moment function'') of a stationary point process \eqn{X} is defined so that \eqn{\lambda K(r)}{lambda K(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical random point of \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K} function is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1977, 1988). In exploratory analyses, the estimate of \eqn{K} is a useful statistic summarising aspects of inter-point ``dependence'' and ``clustering''. For inferential purposes, the estimate of \eqn{K} is usually compared to the true value of \eqn{K} for a completely random (Poisson) point process, which is \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Deviations between the empirical and theoretical \eqn{K} curves may suggest spatial clustering or spatial regularity. This routine \code{Kest} estimates the \eqn{K} function of a stationary point process, given observation of the process inside a known, bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link[spatstat.geom]{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link[spatstat.geom]{as.ppp}()}. The estimation of \eqn{K} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented for rectangular and polygonal windows (not for binary masks). } \item{translate/translation}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } \item{rigid}{Rigid motion correction (Ohser and Stoyan, 1981). Implemented for all window geometries, but slow for complex windows. } \item{none}{ Uncorrected estimate. An estimate of the K function \emph{without} edge correction. (i.e. setting \eqn{e_{ij} = 1}{e[i,j] = 1} in the equation below. This estimate is \bold{biased} and should not be used for data analysis, \emph{unless} you have an extremely large point pattern (more than 100,000 points). } \item{periodic}{ Periodic (toroidal) edge correction. Defined only for rectangular windows. } \item{best}{ Selects the best edge correction that is available for the geometry of the window. Currently this is Ripley's isotropic correction for a rectangular or polygonal window, and the translation correction for masks. } \item{good}{ Selects the best edge correction that can be computed in a reasonable time. This is the same as \code{"best"} for datasets with fewer than 3000 points; otherwise the selected edge correction is \code{"border"}, unless there are more than 100,000 points, when it is \code{"none"}. } } The estimates of \eqn{K(r)} are of the form \deqn{ \hat K(r) = \frac a {n (n-1) } \sum_i \sum_j I(d_{ij}\le r) e_{ij} }{ Kest(r) = (a/(n * (n-1))) * sum[i,j] I(d[i,j] <= r) e[i,j]) } where \eqn{a} is the area of the window, \eqn{n} is the number of data points, and the sum is taken over all ordered pairs of points \eqn{i} and \eqn{j} in \code{X}. Here \eqn{d_{ij}}{d[i,j]} is the distance between the two points, and \eqn{I(d_{ij} \le r)}{I(d[i,j] <= r)} is the indicator that equals 1 if the distance is less than or equal to \eqn{r}. The term \eqn{e_{ij}}{e[i,j]} is the edge correction weight (which depends on the choice of edge correction listed above). Note that this estimator assumes the process is stationary (spatially homogeneous). For inhomogeneous point patterns, see \code{\link{Kinhom}}. If the point pattern \code{X} contains more than about 3000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. If \code{X} contains more than about 100,000 points, even the border correction is time-consuming. You may want to consider setting \code{correction="none"} in this case. There is an even faster algorithm for the uncorrected estimate. Approximations to the variance of \eqn{\hat K(r)}{Kest(r)} are available, for the case of the isotropic edge correction estimator, \bold{assuming complete spatial randomness} (Ripley, 1988; Lotwick and Silverman, 1982; Diggle, 2003, pp 51-53). If \code{var.approx=TRUE}, then the result of \code{Kest} also has a column named \code{rip} giving values of Ripley's (1988) approximation to \eqn{\mbox{var}(\hat K(r))}{var(Kest(r))}, and (if the window is a rectangle) a column named \code{ls} giving values of Lotwick and Silverman's (1982) approximation. If the argument \code{domain} is given, the calculations will be restricted to a subset of the data. In the formula for \eqn{K(r)} above, the \emph{first} point \eqn{i} will be restricted to lie inside \code{domain}. The result is an approximately unbiased estimate of \eqn{K(r)} based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link[spatstat.geom]{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The estimator \code{Kest} ignores marks. Its counterparts for multitype point patterns are \code{\link{Kcross}}, \code{\link{Kdot}}, and for general marked point patterns see \code{\link{Kmulti}}. Some writers, particularly Stoyan (1994, 1995) advocate the use of the ``pair correlation function'' \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}. See \code{\link{pcf}} on how to estimate this function. } \section{Envelopes, significance bands and confidence intervals}{ To compute simulation envelopes for the \eqn{K}-function under CSR, use \code{\link{envelope}}. To compute a confidence interval for the true \eqn{K}-function, use \code{\link{varblock}} or \code{\link{lohboot}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ohser, J. and Stoyan, D. (1981) On the second-order and orientation analysis of planar stationary point processes. \emph{Biometrical Journal} \bold{23}, 523--533. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \section{Warnings}{ The estimator of \eqn{K(r)} is approximately unbiased for each fixed \eqn{r}, for point processes which do not have very strong interaction. (For point processes with a strong clustering interaction, the estimator is negatively biased; for point processes with a strong inhibitive interaction, the estimator is positively biased.) Bias increases with \eqn{r} and depends on the window geometry. For a rectangular window it is prudent to restrict the \eqn{r} values to a maximum of \eqn{1/4} of the smaller side length of the rectangle (Ripley, 1977, 1988; Diggle, 1983). Bias may become appreciable for point patterns consisting of fewer than 15 points. While \eqn{K(r)} is always a non-decreasing function, the estimator of \eqn{K} is not guaranteed to be non-decreasing. This is rarely a problem in practice, except for the border correction estimators when the number of points is small. } \seealso{ \code{\link{localK}} to extract individual summands in the \eqn{K} function. \code{\link{pcf}} for the pair correlation. \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} for alternative summary functions. \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kmulti}} for counterparts of the \eqn{K} function for multitype point patterns. \code{\link[spatstat.univar]{reduced.sample}} for the calculation of reduced sample estimators. } \examples{ X <- runifpoint(50) K <- Kest(X) K <- Kest(cells, correction="isotropic") plot(K) plot(K, main="K function for cells") # plot the L function plot(K, sqrt(iso/pi) ~ r) plot(K, sqrt(./pi) ~ r, ylab="L(r)", main="L function for cells") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Jcross.inhom.Rd0000644000176200001440000001061014611073322017205 0ustar liggesusers\name{Jcross.inhom} \alias{Jcross.inhom} \title{ Inhomogeneous Multitype J function (i-to-j) } \description{ For a multitype point pattern, estimate the inhomogeneous multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and of type \eqn{j}. } \usage{ Jcross.inhom(X, i, j, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{ij}(r)}{Jij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambda}{ Optional. Values of the estimated intensity of the point process. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdamin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution for the computation. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{J} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ This function is the counterpart of \code{\link{Jcross}} for inhomogeneous patterns. It is computed as a special case of \code{\link{Jmulti.inhom}}. } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{J} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Jonatan \Gonzalez and \adrian. } \seealso{ \code{\link{Jdot.inhom}}, \code{\link{Jmulti.inhom}}, \code{\link{Jcross}}. } \examples{ X <- rescale(amacrine) if(interactive() && require(spatstat.model)) { ## how to do it normally mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 dd <- NULL } else { ## for package testing lam <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 dd <- 32 } JC <- Jcross.inhom(X, "on", "off", lambda=lam, lambdamin=lmin, dimyx=dd) } spatstat.explore/man/varblock.Rd0000644000176200001440000001054014643125462016446 0ustar liggesusers\name{varblock} \alias{varblock} \title{ Estimate Variance of Summary Statistic by Subdivision } \description{ This command estimates the variance of any summary statistic (such as the \eqn{K}-function) by spatial subdivision of a single point pattern dataset. } \usage{ varblock(X, fun = Kest, blocks = quadrats(X, nx = nx, ny = ny), \dots, nx = 3, ny = nx, confidence=0.95) } \arguments{ \item{X}{ Point pattern dataset (object of class \code{"ppp"}). } \item{fun}{ Function that computes the summary statistic. } \item{blocks}{ Optional. A tessellation that specifies the division of the space into blocks. } \item{\dots}{ Arguments passed to \code{fun}. } \item{nx,ny}{ Optional. Number of rectangular blocks in the \eqn{x} and \eqn{y} directions. Incompatible with \code{blocks}. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } } \details{ This command computes an estimate of the variance of the summary statistic \code{fun(X)} from a single point pattern dataset \code{X} using a subdivision method. It can be used to plot \bold{confidence intervals} for the true value of a summary function such as the \eqn{K}-function. The window containing \code{X} is divided into pieces by an \code{nx * ny} array of rectangles (or is divided into pieces of more general shape, according to the argument \code{blocks} if it is present). The summary statistic \code{fun} is applied to each of the corresponding sub-patterns of \code{X} as described below. Then the pointwise sample mean, sample variance and sample standard deviation of these summary statistics are computed. Then pointwise confidence intervals are computed, for the specified level of confidence, defaulting to 95 percent. The variance is estimated by equation (4.21) of Diggle (2003, page 52). This assumes that the point pattern \code{X} is stationary. For further details see Diggle (2003, pp 52--53). The estimate of the summary statistic from each block is computed as follows. For most functions \code{fun}, the estimate from block \code{B} is computed by finding the subset of \code{X} consisting of points that fall inside \code{B}, and applying \code{fun} to these points, by calling \code{fun(X[B])}. However if \code{fun} is the \eqn{K}-function \code{\link{Kest}}, or any function which has an argument called \code{domain}, the estimate for each block \code{B} is computed by calling \code{fun(X, domain=B)}. In the case of the \eqn{K}-function this means that the estimate from block \code{B} is computed by counting pairs of points in which the \emph{first} point lies in \code{B}, while the second point may lie anywhere. } \section{Errors}{ If the blocks are too small, there may be insufficient data in some blocks, and the function \code{fun} may report an error. If this happens, you need to take larger blocks. An error message about incompatibility may occur. The different function estimates may be incompatible in some cases, for example, because they use different default edge corrections (typically because the tiles of the tessellation are not the same kind of geometric object as the window of \code{X}, or because the default edge correction depends on the number of points). To prevent this, specify the choice of edge correction, in the \code{correction} argument to \code{fun}, if it has one. An alternative to \code{varblock} is Loh's mark bootstrap \code{\link{lohboot}}. } \value{ A function value table (object of class \code{"fv"}) that contains the result of \code{fun(X)} as well as the sample mean, sample variance and sample standard deviation of the block estimates, together with the upper and lower two-standard-deviation confidence limits. } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ \adrian and \rolf } \seealso{ \code{\link[spatstat.geom]{tess}}, \code{\link[spatstat.geom]{quadrats}} for basic manipulation. \code{\link{lohboot}} for an alternative bootstrap technique. } \examples{ v <- varblock(amacrine, Kest, nx=4, ny=2) v <- varblock(amacrine, Kcross, nx=4, ny=2) if(interactive()) plot(v, iso ~ r, shade=c("hiiso", "loiso")) } \keyword{nonparametric} \keyword{spatial} spatstat.explore/man/dg.test.Rd0000644000176200001440000001255214611073324016212 0ustar liggesusers\name{dg.test} \alias{dg.test} \title{ Dao-Genton Adjusted Goodness-Of-Fit Test } \description{ Performs the Dao and Genton (2014) adjusted goodness-of-fit test of spatial pattern. } \usage{ dg.test(X, \dots, exponent = 2, nsim=19, nsimsub=nsim-1, alternative=c("two.sided", "less", "greater"), reuse = TRUE, leaveout=1, interpolate = FALSE, savefuns=FALSE, savepatterns=FALSE, verbose = TRUE) } \arguments{ \item{X}{ Either a point pattern dataset (object of class \code{"ppp"}, \code{"lpp"} or \code{"pp3"}) or a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \item{\dots}{ Arguments passed to \code{\link{dclf.test}} or \code{\link{mad.test}} or \code{\link{envelope}} to control the conduct of the test. Useful arguments include \code{fun} to determine the summary function, \code{rinterval} to determine the range of \eqn{r} values used in the test, and \code{use.theory} described under Details. } \item{exponent}{ Exponent used in the test statistic. Use \code{exponent=2} for the Diggle-Cressie-Loosmore-Ford test, and \code{exponent=Inf} for the Maximum Absolute Deviation test. } \item{nsim}{ Number of repetitions of the basic test. } \item{nsimsub}{ Number of simulations in each basic test. There will be \code{nsim} repetitions of the basic test, each involving \code{nsimsub} simulated realisations, so there will be a total of \code{nsim * (nsimsub + 1)} simulations. } \item{alternative}{ Character string specifying the alternative hypothesis. The default (\code{alternative="two.sided"}) is that the true value of the summary function is not equal to the theoretical value postulated under the null hypothesis. If \code{alternative="less"} the alternative hypothesis is that the true value of the summary function is lower than the theoretical value. } \item{reuse}{ Logical value indicating whether to re-use the first stage simulations at the second stage, as described by Dao and Genton (2014). } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{interpolate}{ Logical value indicating whether to interpolate the distribution of the test statistic by kernel smoothing, as described in Dao and Genton (2014, Section 5). } \item{savefuns}{ Logical flag indicating whether to save the simulated function values (from the first stage). } \item{savepatterns}{ Logical flag indicating whether to save the simulated point patterns (from the first stage). } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Performs the Dao-Genton (2014) adjusted Monte Carlo goodness-of-fit test, in the equivalent form described by Baddeley et al (2014). If \code{X} is a point pattern, the null hypothesis is CSR. If \code{X} is a fitted model, the null hypothesis is that model. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). The Dao-Genton test is biased when the significance level is very small (small \eqn{p}-values are not reliable) and we recommend \code{\link{bits.test}} in this case. } \value{ A hypothesis test (object of class \code{"htest"} which can be printed to show the outcome of the test. } \references{ Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84} (3) 477--489. Baddeley, A., Hardegen, A., Lawrence, L., Milne, R.K., Nair, G.M. and Rakshit, S. (2017) On two-stage Monte Carlo tests of composite hypotheses. \emph{Computational Statistics and Data Analysis} \bold{114}, 75--87. } \author{ Adrian Baddeley, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Suman Rakshit. Implemented by \spatstatAuthors. } \seealso{ \code{\link{bits.test}}, \code{\link{dclf.test}}, \code{\link{mad.test}} } \examples{ ns <- if(interactive()) 19 else 4 dg.test(cells, nsim=ns) dg.test(cells, alternative="less", nsim=ns) dg.test(cells, nsim=ns, interpolate=TRUE) } \keyword{spatial} \keyword{htest} \concept{Goodness-of-fit} spatstat.explore/man/dclf.test.Rd0000644000176200001440000002605314611073323016530 0ustar liggesusers\name{dclf.test} \alias{dclf.test} \alias{mad.test} \title{ Diggle-Cressie-Loosmore-Ford and Maximum Absolute Deviation Tests } \description{ Perform the Diggle (1986) / Cressie (1991) / Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) mad.test(X, \dots, alternative=c("two.sided", "less", "greater"), rinterval = NULL, leaveout=1, scale=NULL, clamp=FALSE, interpolate=FALSE) } \arguments{ \item{X}{ Data for the test. Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class), a simulation envelope (object of class \code{"envelope"}) or a previous result of \code{dclf.test} or \code{mad.test}. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{verbose=FALSE} to turn off the messages, \code{savefuns} or \code{savepatterns} to save the simulation results, and \code{use.theory} described under Details. } \item{alternative}{ The alternative hypothesis. A character string. The default is a two-sided alternative. See Details. } \item{rinterval}{ Interval of values of the summary function argument \code{r} over which the maximum absolute deviation, or the integral, will be computed for the test. A numeric vector of length 2. } \item{leaveout}{ Optional integer 0, 1 or 2 indicating how to calculate the deviation between the observed summary function and the nominal reference value, when the reference value must be estimated by simulation. See Details. } \item{scale}{ Optional. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the test statistic is computed. } \item{clamp}{ Logical value indicating how to compute deviations in a one-sided test. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{interpolate}{ Logical value specifying whether to calculate the \eqn{p}-value by interpolation. If \code{interpolate=FALSE} (the default), a standard Monte Carlo test is performed, yielding a \eqn{p}-value of the form \eqn{(k+1)/(n+1)} where \eqn{n} is the number of simulations and \eqn{k} is the number of simulated values which are more extreme than the observed value. If \code{interpolate=TRUE}, the \eqn{p}-value is calculated by applying kernel density estimation to the simulated values, and computing the tail probability for this estimated distribution. } } \details{ These functions perform hypothesis tests for goodness-of-fit of a point pattern dataset to a point process model, based on Monte Carlo simulation from the model. \code{dclf.test} performs the test advocated by Loosmore and Ford (2006) which is also described in Diggle (1986), Cressie (1991, page 667, equation (8.5.42)) and Diggle (2003, page 14). See Baddeley et al (2014) for detailed discussion. \code{mad.test} performs the \sQuote{global} or \sQuote{Maximum Absolute Deviation} test described by Ripley (1977, 1981). See Baddeley et al (2014). The type of test depends on the type of argument \code{X}. \itemize{ \item If \code{X} is some kind of point pattern, then a test of Complete Spatial Randomness (CSR) will be performed. That is, the null hypothesis is that the point pattern is completely random. \item If \code{X} is a fitted point process model, then a test of goodness-of-fit for the fitted model will be performed. The model object contains the data point pattern to which it was originally fitted. The null hypothesis is that the data point pattern is a realisation of the model. \item If \code{X} is an envelope object generated by \code{\link{envelope}}, then it should have been generated with \code{savefuns=TRUE} or \code{savepatterns=TRUE} so that it contains simulation results. These simulations will be treated as realisations from the null hypothesis. \item Alternatively \code{X} could be a previously-performed test of the same kind (i.e. the result of calling \code{dclf.test} or \code{mad.test}). The simulations used to perform the original test will be re-used to perform the new test (provided these simulations were saved in the original test, by setting \code{savefuns=TRUE} or \code{savepatterns=TRUE}). } The argument \code{alternative} specifies the alternative hypothesis, that is, the direction of deviation that will be considered statistically significant. If \code{alternative="two.sided"} (the default), both positive and negative deviations (between the observed summary function and the theoretical function) are significant. If \code{alternative="less"}, then only negative deviations (where the observed summary function is lower than the theoretical function) are considered. If \code{alternative="greater"}, then only positive deviations (where the observed summary function is higher than the theoretical function) are considered. In all cases, the algorithm will first call \code{\link{envelope}} to generate or extract the simulated summary functions. The number of simulations that will be generated or extracted, is determined by the argument \code{nsim}, and defaults to 99. The summary function that will be computed is determined by the argument \code{fun} (or the first unnamed argument in the list \code{\dots}) and defaults to \code{\link{Kest}} (except when \code{X} is an envelope object generated with \code{savefuns=TRUE}, when these functions will be taken). The choice of summary function \code{fun} affects the power of the test. It is normally recommended to apply a variance-stabilising transformation (Ripley, 1981). If you are using the \eqn{K} function, the normal practice is to replace this by the \eqn{L} function (Besag, 1977) computed by \code{\link{Lest}}. If you are using the \eqn{F} or \eqn{G} functions, the recommended practice is to apply Fisher's variance-stabilising transformation \eqn{\sin^{-1}\sqrt x}{asin(sqrt(x))} using the argument \code{transform}. See the Examples. The argument \code{rinterval} specifies the interval of distance values \eqn{r} which will contribute to the test statistic (either maximising over this range of values for \code{mad.test}, or integrating over this range of values for \code{dclf.test}). This affects the power of the test. General advice and experiments in Baddeley et al (2014) suggest that the maximum \eqn{r} value should be slightly larger than the maximum possible range of interaction between points. The \code{dclf.test} is quite sensitive to this choice, while the \code{mad.test} is relatively insensitive. It is also possible to specify a pointwise test (i.e. taking a single, fixed value of distance \eqn{r}) by specifing \code{rinterval = c(r,r)}. The argument \code{use.theory} passed to \code{\link{envelope}} determines whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theory=TRUE}) or to the sample mean of simulations from CSR (\code{use.theory=FALSE}). The test statistic \eqn{T} is defined in equations (10.21) and (10.22) respectively on page 394 of Baddeley, Rubak and Turner (2015). The argument \code{leaveout} specifies how to calculate the discrepancy between the summary function for the data and the nominal reference value, when the reference value must be estimated by simulation. The values \code{leaveout=0} and \code{leaveout=1} are both algebraically equivalent (Baddeley et al, 2014, Appendix) to computing the difference \code{observed - reference} where the \code{reference} is the mean of simulated values. The value \code{leaveout=2} gives the leave-two-out discrepancy proposed by Dao and Genton (2014). } \section{Handling Ties}{ If the observed value of the test statistic is equal to one or more of the simulated values (called a \emph{tied value}), then the tied values will be assigned a random ordering, and a message will be printed. } \value{ An object of class \code{"htest"}. Printing this object gives a report on the result of the test. The \eqn{p}-value is contained in the component \code{p.value}. } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs} \bold{84}(3) 477--489. \baddrubaturnbook Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Dao, N.A. and Genton, M. (2014) A Monte Carlo adjusted goodness-of-fit test for parametric models describing spatial point patterns. \emph{Journal of Graphical and Computational Statistics} \bold{23}, 497--517. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neuroscience Methods} \bold{18}, 115--125. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Loosmore, N.B. and Ford, E.D. (2006) Statistical inference using the \emph{G} or \emph{K} point pattern spatial statistics. \emph{Ecology} \bold{87}, 1925--1931. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \author{ \adrian, Andrew Hardegen and Suman Rakshit. } \seealso{ \code{\link{envelope}}, \code{\link{dclf.progress}} } \examples{ dclf.test(cells, Lest, nsim=39) m <- mad.test(cells, Lest, verbose=FALSE, rinterval=c(0, 0.1), nsim=19) m # extract the p-value m$p.value # variance stabilised G function dclf.test(cells, Gest, transform=expression(asin(sqrt(.))), verbose=FALSE, nsim=19) ## one-sided test ml <- mad.test(cells, Lest, verbose=FALSE, nsim=19, alternative="less") ## scaled mad.test(cells, Kest, verbose=FALSE, nsim=19, rinterval=c(0.05, 0.2), scale=function(r) { r }) } \keyword{spatial} \keyword{htest} \concept{Goodness-of-fit} spatstat.explore/man/Linhom.Rd0000644000176200001440000000573014643125461016075 0ustar liggesusers\name{Linhom} \alias{Linhom} \title{Inhomogeneous L-function} \description{ Calculates an estimate of the inhomogeneous version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Linhom(X, ..., correction) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link[spatstat.geom]{as.ppp}()}. } \item{correction,\dots}{ Other arguments passed to \code{\link{Kinhom}} to control the estimation procedure. } } \details{ This command computes an estimate of the inhomogeneous version of the \eqn{L}-function for a spatial point pattern. The original \eqn{L}-function is a transformation (proposed by Besag) of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the Ripley \eqn{K}-function of a spatially homogeneous point pattern, estimated by \code{\link{Kest}}. The inhomogeneous \eqn{L}-function is the corresponding transformation of the inhomogeneous \eqn{K}-function, estimated by \code{\link{Kinhom}}. It is appropriate when the point pattern clearly does not have a homogeneous intensity of points. It was proposed by Baddeley, \Moller and Waagepetersen (2000). The command \code{Linhom} first calls \code{\link{Kinhom}} to compute the estimate of the inhomogeneous K-function, and then applies the square root transformation. For a Poisson point pattern (homogeneous or inhomogeneous), the theoretical value of the inhomogeneous \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Baddeley, A., \Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Kinhom}}, \code{\link{pcf}} } \examples{ X <- japanesepines L <- Linhom(X, sigma=0.1) plot(L, main="Inhomogeneous L function for Japanese Pines") } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Kdot.Rd0000644000176200001440000001701514643125461015547 0ustar liggesusers\name{Kdot} \alias{Kdot} \title{ Multitype K Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of other points of the process within a given distance of a point of type \eqn{i}. } \usage{ Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE, from) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ This argument is for internal use only. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"periodic"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{from}{An alternative way to specify \code{i}.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kdot} and its companions \code{\link{Kcross}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The ``type \eqn{i} to any type'' multitype \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The function \eqn{K_{i\bullet}}{Ki.} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points were independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between types. This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the chosen edge correction(s). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kdot}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # Lansing woods data: 6 types of trees woods <- lansing \testonly{woods <- woods[seq(1, npoints(woods), by=80)]} Kh. <- Kdot(woods, "hickory") # diagnostic plot for independence between hickories and other trees plot(Kh.) # synthetic example with two marks "a" and "b" \donttest{ pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE)) K <- Kdot(pp, "a") } } \author{ \adrian and \rolf. } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/envelope.envelope.Rd0000644000176200001440000000671214611073324020274 0ustar liggesusers\name{envelope.envelope} \alias{envelope.envelope} \title{ Recompute Envelopes } \description{ Given a simulation envelope (object of class \code{"envelope"}), compute another envelope from the same simulation data using different parameters. } \usage{ \method{envelope}{envelope}(Y, fun = NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) } \arguments{ \item{Y}{ A simulation envelope (object of class \code{"envelope"}). } \item{fun}{ Optional. Summary function to be applied to the simulated point patterns. } \item{\dots,transform,global,VARIANCE}{ Parameters controlling the type of envelope that is re-computed. See \code{\link{envelope}}. } } \details{ This function can be used to re-compute a simulation envelope from previously simulated data, using different parameter settings for the envelope: for example, a different significance level, or a global envelope instead of a pointwise envelope. The function \code{\link{envelope}} is generic. This is the method for the class \code{"envelope"}. The argument \code{Y} should be a simulation envelope (object of class \code{"envelope"}) produced by any of the methods for \code{\link{envelope}}. Additionally, \code{Y} must contain either \itemize{ \item the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savepatterns=TRUE}); \item the summary functions of the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savefuns=TRUE}). } If the argument \code{fun} is given, it should be a summary function that can be applied to the simulated point patterns that were used to create \code{Y}. The envelope of the summary function \code{fun} for these point patterns will be computed using the parameters specified in \code{\dots}. If \code{fun} is not given, then: \itemize{ \item If \code{Y} contains the summary functions that were used to compute the original envelope, then the new envelope will be computed from these original summary functions. \item Otherwise, if \code{Y} contains the simulated point patterns. then the \eqn{K} function \code{\link{Kest}} will be applied to each of these simulated point patterns, and the new envelope will be based on the \eqn{K} functions. } The new envelope will be computed using the parameters specified in \code{\dots}. See \code{\link{envelope}} for a full list of envelope parameters. Frequently-used parameters include \code{nrank} and \code{nsim} (to change the number of simulations used and the significance level of the envelope), \code{global} (to change from pointwise to global envelopes) and \code{VARIANCE} (to compute the envelopes from the sample moments instead of the ranks). } \value{ An envelope (object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}} } \examples{ E <- envelope(cells, Kest, nsim=19, savefuns=TRUE, savepatterns=TRUE) E2 <- envelope(E, nrank=2) Eg <- envelope(E, global=TRUE) EG <- envelope(E, Gest) EL <- envelope(E, transform=expression(sqrt(./pi))) } \author{\adrian and \rolf } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} \concept{Goodness-of-fit} \concept{Test of randomness} \concept{Envelope of simulations} spatstat.explore/man/nncorr.Rd0000644000176200001440000002002214643125461016137 0ustar liggesusers\name{nncorr} \alias{nncorr} \alias{nnmean} \alias{nnvario} \title{Nearest-Neighbour Correlation Indices of Marked Point Pattern} \description{ Computes nearest-neighbour correlation indices of a marked point pattern, including the nearest-neighbour mark product index (default case of \code{nncorr}), the nearest-neighbour mark index (\code{nnmean}), and the nearest-neighbour variogram index (\code{nnvario}). } \usage{ nncorr(X, f = function(m1, m2) { m1 * m2 }, k = 1, \dots, use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL, na.action="warn") nnmean(X, k=1, na.action="warn") nnvario(X, k=1, na.action="warn") } \arguments{ \item{X}{ The observed point pattern. An object of class \code{"ppp"}. } \item{f}{ Function \eqn{f} used in the definition of the nearest neighbour correlation. There is a sensible default that depends on the type of marks of \code{X}. } \item{k}{ Integer. The \code{k}-th nearest neighbour of each point will be used. } \item{\dots}{ Extra arguments passed to \code{f}. } \item{use,method}{ Arguments passed to the standard correlation function \code{\link{cor}}. } \item{denominator}{ Internal use only. } \item{na.action}{ Character string (passed to \code{\link[spatstat.geom]{is.marked.ppp}}) specifying what to do if the marks contain \code{NA} values. } } \details{ The nearest neighbour correlation index \eqn{\bar n_f}{nbar} of a marked point process \eqn{X} is a number measuring the dependence between the mark of a typical point and the mark of its nearest neighbour. The command \code{nncorr} computes the nearest neighbour correlation index based on any test function \code{f} provided by the user. The default behaviour of \code{nncorr} is to compute the nearest neighbour mark product index. The commands \code{nnmean} and \code{nnvario} are convenient abbreviations for other special choices of \code{f}. In the default case, \code{nncorr(X)} computes three different versions of the nearest-neighbour correlation index: the unnormalised, normalised, and classical correlations. \describe{ \item{unnormalised:}{ The \bold{unnormalised} nearest neighbour correlation (Stoyan and Stoyan, 1994, section 14.7) is defined as \deqn{\bar n_f = E[f(M, M^\ast)]}{nbar[f] = E[f(M, M*)]} where \eqn{E[]} denotes mean value, \eqn{M} is the mark attached to a typical point of the point process, and \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour (i.e. the nearest other point of the point process). Here \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2).} For example, in the second case, the unnormalised nearest neighbour correlation \eqn{\bar n_f}{nbar[f]} equals the proportion of points in the pattern which have the same mark as their nearest neighbour. Note that \eqn{\bar n_f}{nbar[f]} is not a ``correlation'' in the usual statistical sense. It can take values greater than 1. } \item{normalised:}{ We can define a \bold{normalised} nearest neighbour correlation by \deqn{\bar m_f = \frac{E[f(M,M^\ast)]}{E[f(M,M')]}}{mbar[f] = E[f(M,M*)]/E[f(M,M')]} where again \eqn{M} is the mark attached to a typical point, \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour, and \eqn{M'} is an independent copy of \eqn{M} with the same distribution. This normalisation is also not a ``correlation'' in the usual statistical sense, but is normalised so that the value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{\bar m_f = 1}{mbar[f] = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. } \item{classical:}{ Finally if the marks of \code{X} are real numbers, we can also compute the \bold{classical} correlation, that is, the correlation coefficient of the two random variables \eqn{M} and \eqn{M^\ast}{M*}. The classical correlation has a value between \eqn{-1} and \eqn{1}. Values close to \eqn{-1} or \eqn{1} indicate strong dependence between the marks. } } In the default case where \code{f} is not given, \code{nncorr(X)} computes \itemize{ \item If the marks of \code{X} are real numbers, the unnormalised and normalised versions of the nearest-neighbour product index \eqn{E[M \, M^\ast]}{E[M * M*]}, and the classical correlation between \eqn{M} and \eqn{M^\ast}{M*}. \item If the marks of \code{X} are factor valued, the unnormalised and normalised versions of the nearest-neighbour equality index \eqn{P[M = M^\ast]}{P[M = M*]}. } The wrapper functions \code{nnmean} and \code{nnvario} compute the correlation indices for two special choices of the function \eqn{f(m_1,m_2)}{f(m1,m2)}. They are defined only when the marks are numeric. \itemize{ \item \code{nnmean} computes the correlation indices for \eqn{f(m_1,m_2) = m_1}{f(m1,m2) = m1}. The unnormalised index is simply the mean value of the mark of the neighbour of a typical point, \eqn{E[M^\ast]}{E[M*]}, while the normalised index is \eqn{E[M^\ast]/E[M]}{E[M*]/E[M]}, the ratio of the mean mark of the neighbour of a typical point to the mean mark of a typical point. \item \code{nnvario} computes the correlation indices for \eqn{f(m_1,m_2) = (1/2) (m_1-m_2)^2}{f(m1,m2) = (1/2) * (m1-m2)^2}. } The argument \code{X} must be a point pattern (object of class \code{"ppp"}) and must be a marked point pattern. (The marks may be a data frame, containing several columns of mark variables; each column is treated separately.) If the argument \code{f} is given, it must be a function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative. The arguments \code{use} and \code{method} control the calculation of the classical correlation using \code{\link{cor}}, as explained in the help file for \code{\link{cor}}. Other arguments may be passed to \code{f} through the \code{...} argument. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated using the \sQuote{border method} edge correction. } \value{ Labelled vector of length 2 or 3 containing the unnormalised and normalised nearest neighbour correlations, and the classical correlation if appropriate. Alternatively a matrix with 2 or 3 rows, containing this information for each mark variable. } \examples{ nnmean(finpines) nnvario(finpines) nncorr(finpines) # heights of neighbouring trees are slightly negatively correlated nncorr(amacrine) # neighbouring cells are usually of different type } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/rose.Rd0000644000176200001440000001261214611073325015610 0ustar liggesusers\name{rose} \alias{rose} \alias{rose.default} \alias{rose.histogram} \alias{rose.density} \alias{rose.fv} \title{Rose Diagram} \description{ Plots a rose diagram (rose of directions), the analogue of a histogram or density plot for angular data. } \usage{ rose(x, \dots) \method{rose}{default}(x, breaks = NULL, \dots, weights=NULL, nclass = NULL, unit = c("degree", "radian"), start=0, clockwise=FALSE, main) \method{rose}{histogram}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{density}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) \method{rose}{fv}(x, \dots, unit = c("degree", "radian"), start=0, clockwise=FALSE, main, labels=TRUE, at=NULL, do.plot = TRUE) } \arguments{ \item{x}{ Data to be plotted. A numeric vector containing angles, or a \code{histogram} object containing a histogram of angular values, or a \code{density} object containing a smooth density estimate for angular data, or an \code{fv} object giving a function of an angular argument. } \item{breaks, nclass}{ Arguments passed to \code{\link[graphics]{hist}} to determine the histogram breakpoints. } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{polygon}} controlling the appearance of the plot (or passed from \code{rose.default} to \code{\link[graphics]{hist}} to control the calculation of the histogram). } \item{unit}{ The unit in which the angles are expressed. } \item{start}{ The starting direction for measurement of angles, that is, the spatial direction which corresponds to a measured angle of zero. Either a character string giving a compass direction (\code{"N"} for north, \code{"S"} for south, \code{"E"} for east, or \code{"W"} for west) or a number giving the angle from the the horizontal (East) axis to the starting direction. For example, if \code{unit="degree"} and \code{clockwise=FALSE}, then \code{start=90} and \code{start="N"} are equivalent. The default is to measure angles anti-clockwise from the horizontal axis (East direction). } \item{clockwise}{ Logical value indicating whether angles increase in the clockwise direction (\code{clockwise=TRUE}) or anti-clockwise, counter-clockwise direction (\code{clockwise=FALSE}, the default). } \item{weights}{ Optional vector of numeric weights associated with \code{x}. } \item{main}{ Optional main title for the plot. } \item{labels}{ Either a logical value indicating whether to plot labels next to the tick marks, or a vector of labels for the tick marks. } \item{at}{ Optional vector of angles at which tick marks should be plotted. Set \code{at=numeric(0)} to suppress tick marks. } \item{do.plot}{ Logical value indicating whether to really perform the plot. } } \details{ A rose diagram or rose of directions is the analogue of a histogram or bar chart for data which represent angles in two dimensions. The bars of the bar chart are replaced by circular sectors in the rose diagram. The function \code{rose} is generic, with a default method for numeric data, and methods for histograms and function tables. If \code{x} is a numeric vector, it must contain angular values in the range 0 to 360 (if \code{unit="degree"}) or in the range 0 to \code{2 * pi} (if \code{unit="radian"}). A histogram of the data will first be computed using \code{\link[graphics]{hist}}. Then the rose diagram of this histogram will be plotted by \code{rose.histogram}. If \code{x} is an object of class \code{"histogram"} produced by the function \code{\link[graphics]{hist}}, representing the histogram of angular data, then the rose diagram of the densities (rather than the counts) in this histogram object will be plotted. If \code{x} is an object of class \code{"density"} produced by \code{\link{circdensity}} or \code{\link[stats]{density.default}}, representing a kernel smoothed density estimate of angular data, then the rose diagram of the density estimate will be plotted. If \code{x} is a function value table (object of class \code{"fv"}) then the argument of the function will be interpreted as an angle, and the value of the function will be interpreted as the radius. By default, angles are interpreted using the mathematical convention where the zero angle is the horizontal \eqn{x} axis, and angles increase anti-clockwise. Other conventions can be specified using the arguments \code{start} and \code{clockwise}. Standard compass directions are obtained by setting \code{unit="degree"}, \code{start="N"} and \code{clockwise=TRUE}. } \value{A window (class \code{"owin"}) containing the plotted region.} \author{\adrian \rolf and \ege } \seealso{ \code{\link{fv}}, \code{\link[graphics]{hist}}, \code{\link{circdensity}}, \code{\link[stats]{density.default}}. } \examples{ ang <- runif(1000, max=360) rose(ang, col="grey") rose(ang, col="grey", start="N", clockwise=TRUE) } \keyword{spatial} \keyword{hplot} spatstat.explore/man/plot.laslett.Rd0000644000176200001440000000341314643125462017271 0ustar liggesusers\name{plot.laslett} \alias{plot.laslett} \title{ Plot Laslett Transform } \description{ Plot the result of Laslett's Transform. } \usage{ \method{plot}{laslett}(x, \dots, Xpars = list(box = TRUE, col = "grey"), pointpars = list(pch = 3, cols = "blue"), rectpars = list(lty = 3, border = "green")) } \arguments{ \item{x}{ Object of class \code{"laslett"} produced by \code{\link{laslett}} representing the result of Laslett's transform. } \item{\dots}{ Additional plot arguments passed to \code{\link[spatstat.geom]{plot.solist}}. } \item{Xpars}{ A list of plot arguments passed to \code{\link[spatstat.geom]{plot.owin}} or \code{\link[spatstat.geom]{plot.im}} to display the original region \code{X} before transformation. } \item{pointpars}{ A list of plot arguments passed to \code{\link[spatstat.geom]{plot.ppp}} to display the tangent points. } \item{rectpars}{ A list of plot arguments passed to \code{\link[spatstat.geom]{plot.owin}} to display the maximal rectangle. } } \details{ This is the \code{plot} method for the class \code{"laslett"}. The function \code{\link{laslett}} applies Laslett's Transform to a spatial region \code{X} and returns an object of class \code{"laslett"} representing the result of the transformation. The result is plotted by this method. The plot function \code{\link[spatstat.geom]{plot.solist}} is used to align the before-and-after pictures. See \code{\link[spatstat.geom]{plot.solist}} for further options to control the plot. } \value{ None. } \author{ Kassel Hingee and \adrian. } \seealso{ \code{\link{laslett}} } \examples{ b <- laslett(heather$coarse, plotit=FALSE) plot(b, main="Heather Data") } \keyword{spatial} \keyword{hplot} spatstat.explore/man/Gdot.inhom.Rd0000644000176200001440000001347214611073322016650 0ustar liggesusers\name{Gdot.inhom} \alias{Gdot.inhom} \title{ Inhomogeneous Multitype G Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{G} function, which is the distribution of the distance from a point of type \eqn{i} to the nearest other point of any type, adjusted for spatially varying intensity. } \usage{ Gdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous dot type \eqn{G} function \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the entire point process, Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdamin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{\dots}{ Ignored. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous dot type \eqn{G} function \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. Not normally given by the user; there is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}) containing estimates of the inhomogeneous dot type \eqn{G} function. } \details{ This is a generalisation of the function \code{\link{Gdot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Ginhom}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{a fitted point process model}{ (object of class \code{"ppm"}, \code{"kppm"} or \code{"dppm"}) whose fitted \emph{trend} can be used as the fitted intensity. (If \code{update=TRUE} the model will first be refitted to the data \code{X} before the trend is computed.) } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. Similarly the argument \code{lambdadot} should contain estimated values of the intensity of the entire point process. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Gdot}}, \code{\link{Ginhom}}, \code{\link{Gcross.inhom}}, \code{\link{Gmulti.inhom}}. } \examples{ X <- rescale(amacrine) if(interactive() && require(spatstat.model)) { ## how to do it normally mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 } else { ## for package testing lam <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 } lamI <- lam[marks(X) == "on"] GD <- Gdot.inhom(X, "on", lambdaI=lamI, lambdadot=lam, lambdamin=lmin) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/pool.Rd0000644000176200001440000000171714611073324015614 0ustar liggesusers\name{pool} \alias{pool} \title{ Pool Data } \description{ Pool the data from several objects of the same class. } \usage{ pool(...) } \arguments{ \item{\dots}{ Objects of the same type. } } \details{ The function \code{pool} is generic. There are methods for several classes, listed below. \code{pool} is used to combine the data from several objects of the same type, and to compute statistics based on the combined dataset. It may be used to pool the estimates obtained from replicated datasets. It may also be used in high-performance computing applications, when the objects \code{\dots} have been computed on different processors or in different batch runs, and we wish to combine them. } \value{ An object of the same class as the arguments \code{\dots}. } \seealso{ \code{\link{pool.envelope}}, \code{\link{pool.fasp}}, \code{\link{pool.rat}}, \code{\link{pool.fv}} } \author{\adrian and \rolf } \keyword{spatial} spatstat.explore/man/blurHeat.Rd0000644000176200001440000000424014700374645016414 0ustar liggesusers\name{blurHeat} \alias{blurHeat} \alias{blurHeat.im} \alias{SmoothHeat.im} \title{ Diffusion Blur } \description{ Blur a Pixel Image by Applying Diffusion } \usage{ blurHeat(X, \dots) \method{blurHeat}{im}(X, sigma, \dots, connect = 8, symmetric = FALSE, k= 1, show = FALSE) \method{SmoothHeat}{im}(X, sigma, \dots) } \arguments{ \item{X}{ Pixel image (object of class \code{"im"}). } \item{sigma}{ Smoothing bandwidth. A numeric value, a pixel image or a \code{function(x,y)}. } \item{\dots}{ Ignored by \code{blurHeat.im}. } \item{connect}{ Grid connectivity: either 4 or 8. } \item{symmetric}{ Logical value indicating whether to \emph{force} the algorithm to use a symmetric random walk. } \item{k}{ Integer. Calculations will be performed by repeatedly multiplying the current state by the \code{k}-step transition matrix. } \item{show}{ Logical value indicating whether to plot successive iterations. } } \details{ The function \code{blurHeat} is generic. This help file documents the method \code{blurHeat.im} for pixel images (objects of class \code{"im"}). This is currently equivalent to \code{SmoothHeat.im}, which is also documented here. If \code{sigma} is a numeric value, then the classical time-dependent heat equation is solved up to time \code{t = sigma^2} starting with the initial condition given by the image \code{X}. This has the effect of blurring the input image \code{X}. If \code{sigma} is a function or a pixel image, then it is treated as a spatially-variable diffusion rate, and the corresponding heat equation is solved. This command can be used to calculate the expected value of the diffusion estimator of intensity (\code{\link[spatstat.explore]{densityHeat}}) when the true intensity is known. } \value{ A pixel image on the same raster as \code{X}. } \author{ \adrian. } \seealso{ \code{\link[spatstat.explore]{densityHeat}}, \code{\link[spatstat.explore]{blur}}. } \examples{ Z <- as.im(function(x,y) { sin(10*x) + sin(9*y) }, letterR) ZZ <- blurHeat(Z, 0.2) plot(solist(original=Z, blurred=ZZ), main="") } \keyword{spatial} \keyword{math} spatstat.explore/man/Jmulti.inhom.Rd0000644000176200001440000001047014611073323017213 0ustar liggesusers\name{Jmulti.inhom} \alias{Jmulti.inhom} \title{ Inhomogeneous Marked J-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{J} function. } \usage{ Jmulti.inhom(X, I, J, lambda = NULL, lambdaI = NULL, lambdaJ = NULL, lambdamin = NULL, \dots, r = NULL, ReferenceMeasureMarkSetI = NULL, ratio = FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{J} function \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured, for the inhomogeneous \eqn{G} function. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured, for the inhomogeneous \eqn{G} and \eqn{F} functions. } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process \code{X[I]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[I]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location, } \item{lambdaJ}{ Optional. Values of the estimated intensity of the sub-process \code{X[J]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[J]}, a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"dppm"}), or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdamin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat.geom]{as.mask}} to control the pixel resolution for the computation. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{ReferenceMeasureMarkSetI}{ Optional. The total measure of the mark set. A positive number. } \item{ratio}{ Logical value indicating whether to save ratio information. } } \details{ This function is the counterpart of \code{\link{Jmulti}} for inhomogeneous patterns. It is computed by evaluating the inhomogeneous \eqn{G} function \code{\link{GmultiInhom}} and the inhomogeneous \eqn{F} function \code{\link{FmultiInhom}} and computing the ratio \eqn{J = (1-G)/(1-F)}. } \value{ Object of class \code{"fv"} containing the estimate of the inhomogeneous multitype \eqn{J} function. } \references{ Cronie, O. and Van Lieshout, M.N.M. (2015) Summary statistics for inhomogeneous marked point processes. \emph{Annals of the Institute of Statistical Mathematics} DOI: 10.1007/s10463-015-0515-z } \author{ Jonatan \Gonzalez and \adrian. } \seealso{ \code{\link{Jcross.inhom}}, \code{\link{Jdot.inhom}} for special cases. \code{\link{GmultiInhom}}, \code{\link{FmultiInhom}}, \code{\link{Jmulti}}. } \examples{ X <- rescale(amacrine) I <- (marks(X) == "on") J <- (marks(X) == "off") if(interactive() && require(spatstat.model)) { ## how to do it normally mod <- ppm(X ~ marks * x) lam <- fitted(mod, dataonly=TRUE) lmin <- min(predict(mod)[["off"]]) * 0.9 dd <- NULL } else { ## for package testing lam <- intensity(X)[as.integer(marks(X))] lmin <- intensity(X)[2] * 0.9 dd <- 32 } JM <- Jmulti.inhom(X, I, J, lambda=lam, lambdamin=lmin, dimyx=dd) } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/densityVoronoi.Rd0000644000176200001440000001431314643125461017677 0ustar liggesusers\name{densityVoronoi} \alias{densityVoronoi} \alias{densityVoronoi.ppp} \title{Intensity Estimate of Point Pattern Using Voronoi-Dirichlet Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern using the Dirichlet-Voronoi tessellation. } \usage{ densityVoronoi(X, \dots) \method{densityVoronoi}{ppp}(X, f = 1, \dots, counting=FALSE, fixed=FALSE, nrep = 1, verbose=TRUE) } \arguments{ \item{X}{Point pattern dataset (object of class \code{"ppp"}).} \item{f}{ Fraction (between 0 and 1 inclusive) of the data points that will be used to build a tessellation for the intensity estimate. } \item{\dots}{Arguments passed to \code{\link[spatstat.geom]{as.im}} determining the pixel resolution of the result. } \item{counting}{ Logical value specifying the choice of estimation method. See Details. } \item{fixed}{ Logical. If \code{FALSE} (the default), the data points are independently randomly thinned, so the number of data points that are retained is random. If \code{TRUE}, the number of data points retained is fixed. See Details. } \item{nrep}{Number of independent repetitions of the randomised procedure.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The result is a pixel image giving the estimated intensity. If \code{f=1} (the default), the Voronoi estimate (Barr and Schoenberg, 2010) is computed: the point pattern \code{X} is used to construct a Voronoi/Dirichlet tessellation (see \code{\link[spatstat.geom]{dirichlet}}); the areas of the Dirichlet tiles are computed; the estimated intensity in each tile is the reciprocal of the tile area. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{f=0}, the intensity estimate at every location is equal to the average intensity (number of points divided by window area). The result is a pixel image of intensity estimates which are constant. If \code{f} is strictly between 0 and 1, the estimation method is applied to a random subset of \code{X}. This randomised procedure is repeated \code{nrep} times, and the results are averaged. The subset is selected as follows: \itemize{ \item if \code{fixed=FALSE}, the dataset \code{X} is randomly thinned by deleting or retaining each point independently, with probability \code{f} of retaining a point. \item if \code{fixed=TRUE}, a random sample of fixed size \code{m} is taken from the dataset \code{X}, where \code{m} is the largest integer less than or equal to \code{f*n} and \code{n} is the number of points in \code{X}. } Then the intensity estimate is calculated as follows: \itemize{ \item if \code{counting = FALSE} (the default), the thinned pattern is used to construct a Dirichlet tessellation and form the Voronoi estimate (Barr and Schoenberg, 2010) which is then adjusted by a factor \code{1/f} or \code{n/m} as appropriate. to obtain an estimate of the intensity of \code{X} in the tile. \item if \code{counting = TRUE}, the randomly selected subset \code{A} is used to construct a Dirichlet tessellation, while the complementary subset \code{B} (consisting of points that were not selected in the sample) is used for counting to calculate a quadrat count estimate of intensity. For each tile of the Dirichlet tessellation formed by \code{A}, we count the number of points of \code{B} falling in the tile, and divide by the area of the same tile, to obtain an estimate of the intensity of the pattern \code{B} in the tile. This estimate is adjusted by \code{1/(1-f)} or \code{n/(n-m)} as appropriate to obtain an estimate of the intensity of \code{X} in the tile. } Ogata et al. (2003) and Ogata (2004) estimated intensity using the Dirichlet-Voronoi tessellation in a modelling context. Baddeley (2007) proposed intensity estimation by subsampling with \code{0 < f < 1}, and used the technique described above with \code{fixed=TRUE} and \code{counting=TRUE}. Barr and Schoenberg (2010) described and analysed the Voronoi estimator (corresponding to \code{f=1}). Moradi et al (2019) developed the subsampling technique with \code{fixed=FALSE} and \code{counting=FALSE} and called it the \emph{smoothed Voronoi estimator}. } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{adaptive.density}}, \code{\link{density.ppp}}, \code{\link[spatstat.geom]{dirichlet}}, \code{\link[spatstat.geom]{im.object}}. } \references{ Baddeley, A. (2007) Validation of statistical models for spatial point patterns. In J.G. Babu and E.D. Feigelson (eds.) \emph{SCMA IV: Statistical Challenges in Modern Astronomy IV}, volume 317 of Astronomical Society of the Pacific Conference Series, San Francisco, California USA, 2007. Pages 22--38. Barr, C., and Schoenberg, F.P. (2010). On the Voronoi estimator for the intensity of an inhomogeneous planar Poisson process. \emph{Biometrika} \bold{97} (4), 977--984. Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing} \bold{29} (5) 995--1010. Ogata, Y. (2004) Space-time model for regional seismicity and detection of crustal stress changes. \emph{Journal of Geophysical Research}, \bold{109}, 2004. Ogata, Y., Katsura, K. and Tanemura, M. (2003). Modelling heterogeneous space-time occurrences of earthquakes and its residual analysis. \emph{Applied Statistics} \bold{52} 499--509. } \examples{ plot(densityVoronoi(nztrees, 1, f=1), main="Voronoi estimate") nr <- if(interactive()) 100 else 5 plot(densityVoronoi(nztrees, f=0.5, nrep=nr), main="smoothed Voronoi estimate") } \author{ \spatstatAuthors and \mehdi. } \keyword{spatial} \keyword{methods} \keyword{smooth} \concept{Adaptive smoothing} spatstat.explore/man/compatible.fasp.Rd0000644000176200001440000000215014643125461017707 0ustar liggesusers\name{compatible.fasp} \alias{compatible.fasp} \title{Test Whether Function Arrays Are Compatible} \description{ Tests whether two or more function arrays (class \code{"fasp"}) are compatible. } \usage{ \method{compatible}{fasp}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function arrays (object of class \code{"fasp"}).} } \details{ An object of class \code{"fasp"} can be regarded as an array of functions. Such objects are returned by the command \code{\link[spatstat.explore]{alltypes}}. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link[spatstat.geom]{compatible}}. The function arrays are compatible if the arrays have the same dimensions, and the corresponding elements in each cell of the array are compatible as defined by \code{\link{compatible.fv}}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fasp}} } \author{\adrian and \rolf} \keyword{spatial} \keyword{manip} spatstat.explore/man/SpatialQuantile.Rd0000644000176200001440000000206214611073325017736 0ustar liggesusers\name{SpatialQuantile} \alias{SpatialMedian} \alias{SpatialQuantile} \title{ Spatially Weighted Median or Quantile } \description{ Compute a weighted median or weighted quantile of spatial data. } \usage{ SpatialMedian(X, \dots) SpatialQuantile(X, prob = 0.5, \dots) } \arguments{ \item{X}{ A spatial data object. } \item{prob}{ Probability for which the quantile is required. A single numeric value between 0 and 1. Default is to calculate the median. } \item{\dots}{ Further arguments passed to methods. } } \details{ The functions \code{SpatialMedian} and \code{SpatialQuantile} are generic. They calculate spatially weighted medians and quantiles of spatial data. The details depend on the class of \code{X}. There are methods for spatial point patterns (class \code{"ppp"}) and possibly for other objects. } \author{ \adrian. } \seealso{ Methods \code{\link{SpatialMedian.ppp}}, \code{\link{SpatialQuantile.ppp}}. \code{\link{Smooth}} for the spatially weighted average. } \keyword{spatial} \keyword{smooth} spatstat.explore/man/Kmark.Rd0000644000176200001440000001424514643125461015715 0ustar liggesusers\name{Kmark} \alias{Kmark} \alias{markcorrint} \title{Mark-Weighted K Function} \description{ Estimates the mark-weighted \eqn{K} function of a marked point pattern. } \usage{ Kmark(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) markcorrint(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. Alternatively \code{correction="all"} selects all options. } \item{\dots}{ Ignored. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{returnL}{ Compute the analogue of the K-function if \code{returnL=FALSE} or the analogue of the L-function if \code{returnL=TRUE}. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \details{ The functions \code{Kmark} and \code{markcorrint} are identical. (Eventually \code{markcorrint} will be deprecated.) The \emph{mark-weighted \eqn{K} function} \eqn{K_f(r)}{K[f](r)} of a marked point process (Penttinen et al, 1992) is a generalisation of Ripley's \eqn{K} function, in which the contribution from each pair of points is weighted by a function of their marks. If the marks of the two points are \eqn{m_1, m_2}{m1, m2} then the weight is proportional to \eqn{f(m_1, m_2)}{f(m1, m2)} where \eqn{f} is a specified \emph{test function}. The mark-weighted \eqn{K} function is defined so that \deqn{ \lambda K_f(r) = \frac{C_f(r)}{E[ f(M_1, M_2) ]} }{ lambda * K_f(r) = C[f](r)/E[f(M1, M2)] } where \deqn{ C_f(r) = E \left[ \sum_{x \in X} f(m(u), m(x)) 1{0 < ||u - x|| \le r} \; \big| \; u \in X \right] }{ C[f](r) = E[ sum[x in X] f(m(u), m(x)) 1(0 < d(u,x) <= r) | u in X] } for any spatial location \eqn{u} taken to be a typical point of the point process \eqn{X}. Here \eqn{||u-x||}{d(u,x)} is the euclidean distance between \eqn{u} and \eqn{x}, so that the sum is taken over all random points \eqn{x} that lie within a distance \eqn{r} of the point \eqn{u}. The function \eqn{C_f(r)}{C[f](r)} is the \emph{unnormalised} mark-weighted \eqn{K} function. To obtain \eqn{K_f(r)}{K[f](r)} we standardise \eqn{C_f(r)}{C[f](r)} by dividing by \eqn{E[f(M_1,M_2)]}{E[f(M1,M2)]}, the expected value of \eqn{f(M_1,M_2)}{f(M1,M2)} when \eqn{M_1}{M1} and \eqn{M_2}{M2} are independent random marks with the same distribution as the marks in the point process. Under the hypothesis of random labelling, the mark-weighted \eqn{K} function is equal to Ripley's \eqn{K} function, \eqn{K_f(r) = K(r)}{K[f](r) = K(r)}. The mark-weighted \eqn{K} function is sometimes called the \emph{mark correlation integral} because it is related to the mark correlation function \eqn{k_f(r)}{k[f](r)} and the pair correlation function \eqn{g(r)} by \deqn{ K_f(r) = 2 \pi \int_0^r s k_f(s) \, g(s) \, {\rm d}s }{ K[f](r) = 2 * pi * integral[0,r] (s * k[f](s) * g(s) ) ds } See \code{\link{markcorr}} for a definition of the mark correlation function. Given a marked point pattern \code{X}, this command computes edge-corrected estimates of the mark-weighted \eqn{K} function. If \code{returnL=FALSE} then the estimated function \eqn{K_f(r)}{K[f](r)} is returned; otherwise the function \deqn{ L_f(r) = \sqrt{K_f(r)/\pi} }{ L[f](r) = sqrt(K[f](r)/pi) } is returned. } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation integral \eqn{K_f(r)}{K[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_f(r)}{K[f](r)} when the marks attached to different points are independent, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark-weighted \eqn{K} function \eqn{K_f(r)}{K[f](r)} obtained by the edge corrections named (if \code{returnL=FALSE}). } \references{ Penttinen, A., Stoyan, D. and Henttonen, H. M. (1992) Marked point processes in forest statistics. \emph{Forest Science} \bold{38} (1992) 806-824. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical analysis and modelling of spatial point patterns}. Chichester: John Wiley. } \seealso{ \code{\link{markcorr}} to estimate the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter # mark correlation function ms <- Kmark(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(npoints(X)) Xc <- Kmark(X) plot(Xc) # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' M <- Kmark(amacrine, function(m1,m2) {m1==m2}, correction="translate") plot(M) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/Emark.Rd0000644000176200001440000001431614643125461015706 0ustar liggesusers\name{Emark} \alias{Emark} \alias{Vmark} \title{ Diagnostics for random marking } \description{ Estimate the summary functions \eqn{E(r)} and \eqn{V(r)} for a marked point pattern, proposed by Schlather et al (2004) as diagnostics for dependence between the points and the marks. } \usage{ Emark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) Vmark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link[spatstat.geom]{as.ppp}}. The pattern should have numeric marks. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If\code{TRUE}, normalise the estimate of \eqn{E(r)} or \eqn{V(r)} so that it would have value equal to 1 if the marks are independent of the points. } } \value{ If \code{marks(X)} is a numeric vector, the result is an object of class \code{"fv"} (see \code{\link{fv.object}}). If \code{marks(X)} is a data frame, the result is a list of objects of class \code{"fv"}, one for each column of marks. An object of class \code{"fv"} is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} has been estimated } \item{theo}{the theoretical, constant value of \eqn{E(r)} or \eqn{V(r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{E(r)} or \eqn{V(r)} obtained by the edge corrections named. } \details{ For a marked point process, Schlather et al (2004) defined the functions \eqn{E(r)} and \eqn{V(r)} to be the conditional mean and conditional variance of the mark attached to a typical random point, given that there exists another random point at a distance \eqn{r} away from it. More formally, \deqn{ E(r) = E_{0u}[M(0)] }{ E(r) = E[0u] M(0) } and \deqn{ V(r) = E_{0u}[(M(0)-E(u))^2] }{ V(r) = E[0u]((M(0)-E(u))^2) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0)} denotes the mark attached to the point \eqn{0}. These functions may serve as diagnostics for dependence between the points and the marks. If the points and marks are independent, then \eqn{E(r)} and \eqn{V(r)} should be constant (not depending on \eqn{r}). See Schlather et al (2004). The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link[spatstat.geom]{as.ppp}}. It must be a marked point pattern with numeric marks. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{Window(X)}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Schlather, M. and Ribeiro, P. and Diggle, P. (2004) Detecting dependence between marks and locations of marked point processes. \emph{Journal of the Royal Statistical Society, series B} \bold{66} (2004) 79-83. } \seealso{ Mark correlation \code{\link{markcorr}}, mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ plot(Emark(spruces)) E <- Emark(spruces, method="density", kernel="epanechnikov") plot(Vmark(spruces)) plot(Emark(finpines)) V <- Vmark(finpines) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.explore/man/stienen.Rd0000644000176200001440000000414314643125462016312 0ustar liggesusers\name{stienen} \alias{stienen} \alias{stienenSet} \title{ Stienen Diagram } \description{ Draw the Stienen diagram of a point pattern, or compute the region covered by the Stienen diagram. } \usage{ stienen(X, \dots, bg = "grey", border = list(bg = NULL)) stienenSet(X, edge=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link[spatstat.geom]{plot.ppp}} to control the plot. } \item{bg}{ Fill colour for circles. } \item{border}{ Either a list of arguments passed to \code{\link[spatstat.geom]{plot.ppp}} to control the display of circles at the border of the diagram, or the value \code{FALSE} indicating that the border circles should not be plotted. } \item{edge}{ Logical value indicating whether to include the circles at the border of the diagram. } } \details{ The Stienen diagram of a point pattern (Stienen, 1982) is formed by drawing a circle around each point of the pattern, with diameter equal to the nearest-neighbour distance for that point. These circles do not overlap. If two points are nearest neighbours of each other, then the corresponding circles touch. \code{stienenSet(X)} computes the union of these circles and returns it as a window (object of class \code{"owin"}). \code{stienen(X)} generates a plot of the Stienen diagram of the point pattern \code{X}. By default, circles are shaded in grey if they lie inside the window of \code{X}, and are not shaded otherwise. } \value{ The plotting function \code{stienen} returns \code{NULL}. The return value of \code{stienenSet} is a window (object of class \code{"owin"}). } \references{ Stienen, H. (1982) \emph{Die Vergroeberung von Karbiden in reinen Eisen-Kohlenstoff Staehlen}. Dissertation, RWTH Aachen. } \seealso{ \code{\link[spatstat.geom]{nndist}}, \code{\link[spatstat.geom]{plot.ppp}} } \examples{ Y <- stienenSet(cells) stienen(redwood) stienen(redwood, border=list(bg=NULL, lwd=2, cols="red")) } \author{\spatstatAuthors.} \keyword{spatial} \keyword{math} \keyword{manip} spatstat.explore/DESCRIPTION0000644000176200001440000001071114737455262015317 0ustar liggesusersPackage: spatstat.explore Version: 3.3-4 Date: 2025-01-08 Title: Exploratory Data Analysis for the 'spatstat' Family Authors@R: c(person("Adrian", "Baddeley", role = c("aut", "cre", "cph"), email = "Adrian.Baddeley@curtin.edu.au", comment = c(ORCID="0000-0001-9499-8382")), person("Rolf", "Turner", role = c("aut", "cph"), email="rolfturner@posteo.net", comment=c(ORCID="0000-0001-5521-5218")), person("Ege", "Rubak", role = c("aut", "cph"), email = "rubak@math.aau.dk", comment=c(ORCID="0000-0002-6675-533X")), person("Kasper", "Klitgaard Berthelsen", role = "ctb"), person("Warick", "Brown", role = "cph"), person("Achmad", "Choiruddin", role = "ctb"), person("Jean-Francois", "Coeurjolly", role = "ctb"), person("Ottmar", "Cronie", role = "ctb"), person("Tilman", "Davies", role = c("ctb", "cph")), person("Julian", "Gilbey", role = "ctb"), person("Jonatan", "Gonzalez", role = "ctb"), person("Yongtao", "Guan", role = "ctb"), person("Ute", "Hahn", role = "ctb"), person("Kassel", "Hingee", role = c("ctb", "cph")), person("Abdollah", "Jalilian", role = "ctb"), person("Frederic", "Lavancier", role = "ctb"), person("Marie-Colette", "van Lieshout", role = c("ctb", "cph")), person("Greg", "McSwiggan", role = "ctb"), person("Robin K", "Milne", role = "cph"), person("Tuomas", "Rajala", role = "ctb"), person("Suman", "Rakshit", role = c("ctb", "cph")), person("Dominic", "Schuhmacher", role = "ctb"), person("Rasmus", "Plenge Waagepetersen", role = "ctb"), person("Hangsheng", "Wang", role = "ctb")) Maintainer: Adrian Baddeley Depends: R (>= 3.5.0), spatstat.data (>= 3.1-2), spatstat.univar (>= 3.0-0), spatstat.geom (>= 3.3-2), spatstat.random (>= 3.3-1), stats, graphics, grDevices, utils, methods, nlme Imports: spatstat.utils (>= 3.1-0), spatstat.sparse (>= 3.1-0), goftest (>= 1.2-2), Matrix, abind Suggests: sm, gsl, locfit, spatial, fftwtools (>= 0.9-8), spatstat.linnet (>= 3.2-1), spatstat.model (>= 3.3-1), spatstat (>= 3.1-1) Description: Functionality for exploratory data analysis and nonparametric analysis of spatial data, mainly spatial point patterns, in the 'spatstat' family of packages. (Excludes analysis of spatial data on a linear network, which is covered by the separate package 'spatstat.linnet'.) Methods include quadrat counts, K-functions and their simulation envelopes, nearest neighbour distance and empty space statistics, Fry plots, pair correlation function, kernel smoothed intensity, relative risk estimation with cross-validated bandwidth selection, mark correlation functions, segregation indices, mark dependence diagnostics, and kernel estimates of covariate effects. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Monte Carlo, Diggle-Cressie-Loosmore-Ford, Dao-Genton, two-stage Monte Carlo) and tests for covariate effects (Cox-Berman-Waller-Lawson, Kolmogorov-Smirnov, ANOVA) are also supported. License: GPL (>= 2) URL: http://spatstat.org/ NeedsCompilation: yes ByteCompile: true BugReports: https://github.com/spatstat/spatstat.explore/issues Packaged: 2025-01-08 09:42:39 UTC; adrian Author: Adrian Baddeley [aut, cre, cph] (), Rolf Turner [aut, cph] (), Ege Rubak [aut, cph] (), Kasper Klitgaard Berthelsen [ctb], Warick Brown [cph], Achmad Choiruddin [ctb], Jean-Francois Coeurjolly [ctb], Ottmar Cronie [ctb], Tilman Davies [ctb, cph], Julian Gilbey [ctb], Jonatan Gonzalez [ctb], Yongtao Guan [ctb], Ute Hahn [ctb], Kassel Hingee [ctb, cph], Abdollah Jalilian [ctb], Frederic Lavancier [ctb], Marie-Colette van Lieshout [ctb, cph], Greg McSwiggan [ctb], Robin K Milne [cph], Tuomas Rajala [ctb], Suman Rakshit [ctb, cph], Dominic Schuhmacher [ctb], Rasmus Plenge Waagepetersen [ctb], Hangsheng Wang [ctb] Repository: CRAN Date/Publication: 2025-01-08 11:00:02 UTC