spatstat.linnet/0000755000176200001440000000000014155176075013420 5ustar liggesusersspatstat.linnet/NAMESPACE0000644000176200001440000003230314155072546014636 0ustar liggesusers# spatstat.linnet NAMESPACE file ## ................ Import packages .................. import(stats,graphics,grDevices,methods,utils) import(spatstat.utils) import(spatstat.data,spatstat.geom,spatstat.core) import(Matrix,spatstat.sparse) ## ................ Load dynamic library .............. ## (native routines are registered in init.c) ## (entry points are symbols with prefix "SL_") useDynLib(spatstat.linnet, .registration=TRUE, .fixes="SL_") ## ////////// DO NOT EDIT THE FOLLOWING /////////////// ## //////// it is generated automatically ///////////// # .................................................. # Automatically-generated list of documented objects # .................................................. export("addVertices") export("affine.linim") export("affine.linnet") export("affine.lpp") export("anova.lppm") export("ApplyConnected") export("as.data.frame.linfun") export("as.data.frame.linim") export("as.data.frame.lintess") export("as.function.linfun") export("as.im.linim") export("as.linfun") export("as.linfun.linfun") export("as.linfun.linim") export("as.linfun.lintess") export("as.linim") export("as.linim.default") export("as.linim.linfun") export("as.linim.linim") export("as.linnet") export("as.linnet.linfun") export("as.linnet.linim") export("as.linnet.linnet") export("as.linnet.lintess") export("as.linnet.lpp") export("as.linnet.lppm") export("as.linnet.psp") export("as.lpp") export("as.owin.linfun") export("as.owin.linnet") export("as.owin.lintess") export("as.owin.lpp") export("as.owin.lppm") export("as.ppm.lppm") export("as.ppp.lpp") export("as.psp.linnet") export("as.psp.lpp") export("auc.lpp") export("auc.lppm") export("begins") export("berman.test.lpp") export("berman.test.lppm") export("boundingradius.linnet") export("branchlabelfun") export("bw.lppl") export("bw.relrisklpp") export("bw.voronoi") export("cdf.test.lpp") export("cdf.test.lppm") export("chop.linnet") export("circumradius.linnet") export("clickjoin") export("clicklpp") export("coef.lppm") export("Complex.linim") export("Complex.linimlist") export("connected.linnet") export("connected.lpp") export("countends") export("crossdist.lpp") export("crossing.linnet") export("cut.lpp") export("data.lppm") export("default.linnet.tolerance") export("delaunayNetwork") export("deletebranch") export("deletebranch.linnet") export("deletebranch.lpp") export("densityEqualSplit") export("densityfun.lpp") export("densityHeat.lpp") export("density.linnet") export("density.lpp") export("densitypointsLPP") export("densityQuick.lpp") export("density.splitppx") export("densityVoronoi.lpp") export("deviance.lppm") export("diameter.linnet") export("dirichletNetwork") export("distfun.lpp") export("divide.linnet") export("DoCountCrossEnds") export("DoCountEnds") export("domain.linfun") export("domain.lintess") export("domain.lpp") export("domain.lppm") export("emend.lppm") export("envelope.lpp") export("envelope.lppm") export("evalCovar.lppm") export("eval.linim") export("extractAIC.lppm") export("extractbranch") export("extractbranch.linnet") export("extractbranch.lpp") export("FDMKERNEL") export("fitted.lppm") export("flatdensityatpointslpp") export("flatdensityfunlpp") export("formula.lppm") export("getlambda.lpp") export("heatkernelapprox") export("identify.lpp") export("insertVertices") export("integral.linfun") export("integral.linim") export("intensity.lpp") export("intersect.lintess") export("is.connected.linnet") export("is.marked.lppm") export("is.multitype.lpp") export("is.multitype.lppm") export("is.poisson.lppm") export("is.stationary.lppm") export("joinVertices") export("ldtEngine") export("lineardirichlet") export("lineardisc") export("linearK") export("linearKcross") export("linearKcross.inhom") export("linearKdot") export("linearKdot.inhom") export("linearKengine") export("linearKinhom") export("linearKmulti") export("linearKmultiEngine") export("linearKmulti.inhom") export("linearmarkconnect") export("linearmarkequal") export("linearpcf") export("linearpcfcross") export("linearpcfcross.inhom") export("linearpcfdot") export("linearpcfdot.inhom") export("linearpcfengine") export("linearpcfinhom") export("linearpcfmulti") export("linearPCFmultiEngine") export("linearpcfmulti.inhom") export("lineartileindex") export("linequad") export("linfun") export("[.linim") export("[<-.linim") export("linim") export("LinimListOp") export("LinimOp") export("[.linnet") export("linnet") export("lintess") export("lixellate") export("local2lpp") export("logLik.lppm") export("looHeatLPP") export("looVoronoiLPP") export("[.lpp") export("lpp") export("lppm") export("lppm.formula") export("lppm.lpp") export("makeLinnetTolerance") export("marks.lintess") export("marks<-.lintess") export("marks<-.lpp") export("Math.linim") export("Math.linimlist") export("mean.linim") export("median.linim") export("model.frame.lppm") export("model.images.lppm") export("model.matrix.lppm") export("nncross.lpp") export("nndist.lpp") export("nnfromvertex") export("nnfun.lpp") export("nnwhich.lpp") export("nobjects.lintess") export("nobs.lppm") export("nsegments.linnet") export("nsegments.lpp") export("nvertices.linnet") export("Ops.linim") export("Ops.linimlist") export("pairdist.lpp") export("pairs.linim") export("persp.linfun") export("persp.linim") export("pixellate.linnet") export("plot.linfun") export("plot.linim") export("plot.linnet") export("plot.lintess") export("plot.lpp") export("plot.lppm") export("pointsAlongNetwork") export("points.lpp") export("predict.lppm") export("print.linfun") export("print.linim") export("print.linnet") export("print.lintess") export("print.lpp") export("print.lppm") export("print.summary.linim") export("print.summary.linnet") export("print.summary.lintess") export("print.summary.lpp") export("pseudoR2.lppm") export("qkdeEngine") export("quantile.linim") export("rcelllpp") export("relrisk.lpp") export("repairNetwork") export("resampleNetworkDataFrame") export("rescale.linnet") export("rescale.lpp") export("resolve.heat.steps") export("response.lppm") export("rhohat.lpp") export("rhohat.lppm") export("rjitter.lpp") export("rjitterlpp") export("rlpp") export("roc.lpp") export("roc.lppm") export("rotate.linnet") export("rotate.lpp") export("rpoislpp") export("rSwitzerlpp") export("runiflpp") export("scalardilate.linim") export("scalardilate.linnet") export("scalardilate.lpp") export("sdr.lpp") export("shift.linim") export("shift.linnet") export("shift.lpp") export("simulate.lppm") export("sortalongsegment") export("subset.lpp") export("summary.linfun") export("summary.linim") export("Summary.linim") export("Summary.linimlist") export("summary.linnet") export("summary.lintess") export("summary.lpp") export("summary.lppm") export("superimpose.lpp") export("terms.lppm") export("text.lpp") export("thinNetwork") export("tile.lengths") export("tilenames.lintess") export("tilenames<-.lintess") export("treebranchlabels") export("treeprune") export("unitname.linnet") export("unitname<-.linnet") export("unitname.lpp") export("unitname<-.lpp") export("unmark.lintess") export("unmark.lpp") export("unstack.lintess") export("unstack.lpp") export("update.lppm") export("validate.lpp.coords") export("valid.lppm") export("vcov.lppm") export("vertexdegree") export("vertices.linnet") export("vnnFind") export("volume.linnet") export("Window.linnet") export("Window<-.linnet") export("Window.lintess") export("Window.lpp") export("Window<-.lpp") export("Window.lppm") # ....... Special cases ........... S3method("Complex", "linim") S3method("Complex", "linimlist") S3method("Math", "linim") S3method("Math", "linimlist") S3method("Ops", "linim") S3method("Ops", "linimlist") S3method("Summary", "linim") S3method("Summary", "linimlist") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("affine", "linim") S3method("affine", "linnet") S3method("affine", "lpp") S3method("anova", "lppm") S3method("as.data.frame", "linfun") S3method("as.data.frame", "linim") S3method("as.data.frame", "lintess") S3method("as.function", "linfun") S3method("as.im", "linim") S3method("as.linfun", "linfun") S3method("as.linfun", "linim") S3method("as.linfun", "lintess") S3method("as.linim", "default") S3method("as.linim", "linfun") S3method("as.linim", "linim") S3method("as.linnet", "linfun") S3method("as.linnet", "linim") S3method("as.linnet", "linnet") S3method("as.linnet", "lintess") S3method("as.linnet", "lpp") S3method("as.linnet", "lppm") S3method("as.linnet", "psp") S3method("as.owin", "linfun") S3method("as.owin", "linnet") S3method("as.owin", "lintess") S3method("as.owin", "lpp") S3method("as.owin", "lppm") S3method("as.ppm", "lppm") S3method("as.ppp", "lpp") S3method("as.psp", "linnet") S3method("as.psp", "lpp") S3method("auc", "lpp") S3method("auc", "lppm") S3method("berman.test", "lpp") S3method("berman.test", "lppm") S3method("boundingradius", "linnet") S3method("cdf.test", "lpp") S3method("cdf.test", "lppm") S3method("circumradius", "linnet") S3method("coef", "lppm") S3method("connected", "linnet") S3method("connected", "lpp") S3method("crossdist", "lpp") S3method("cut", "lpp") S3method("deletebranch", "linnet") S3method("deletebranch", "lpp") S3method("densityfun", "lpp") S3method("densityHeat", "lpp") S3method("density", "linnet") S3method("density", "lpp") S3method("density", "splitppx") S3method("densityVoronoi", "lpp") S3method("deviance", "lppm") S3method("diameter", "linnet") S3method("distfun", "lpp") S3method("domain", "linfun") S3method("domain", "lintess") S3method("domain", "lpp") S3method("domain", "lppm") S3method("emend", "lppm") S3method("envelope", "lpp") S3method("envelope", "lppm") S3method("evalCovar", "lppm") S3method("extractAIC", "lppm") S3method("extractbranch", "linnet") S3method("extractbranch", "lpp") S3method("fitted", "lppm") S3method("formula", "lppm") S3method("identify", "lpp") S3method("integral", "linfun") S3method("integral", "linim") S3method("intensity", "lpp") S3method("is.connected", "linnet") S3method("is.marked", "lppm") S3method("is.multitype", "lpp") S3method("is.multitype", "lppm") S3method("is.poisson", "lppm") S3method("is.stationary", "lppm") S3method("[", "linim") S3method("[", "linnet") S3method("logLik", "lppm") S3method("[", "lpp") S3method("lppm", "formula") S3method("lppm", "lpp") S3method("marks", "lintess") S3method("mean", "linim") S3method("median", "linim") S3method("model.frame", "lppm") S3method("model.images", "lppm") S3method("model.matrix", "lppm") S3method("nncross", "lpp") S3method("nndist", "lpp") S3method("nnfun", "lpp") S3method("nnwhich", "lpp") S3method("nobjects", "lintess") S3method("nobs", "lppm") S3method("nsegments", "linnet") S3method("nsegments", "lpp") S3method("nvertices", "linnet") S3method("pairdist", "lpp") S3method("pairs", "linim") S3method("persp", "linfun") S3method("persp", "linim") S3method("pixellate", "linnet") S3method("plot", "linfun") S3method("plot", "linim") S3method("plot", "linnet") S3method("plot", "lintess") S3method("plot", "lpp") S3method("plot", "lppm") S3method("points", "lpp") S3method("predict", "lppm") S3method("print", "linfun") S3method("print", "linim") S3method("print", "linnet") S3method("print", "lintess") S3method("print", "lpp") S3method("print", "lppm") S3method("print", "summary.linim") S3method("print", "summary.linnet") S3method("print", "summary.lintess") S3method("print", "summary.lpp") S3method("pseudoR2", "lppm") S3method("quantile", "linim") S3method("relrisk", "lpp") S3method("rescale", "linnet") S3method("rescale", "lpp") S3method("response", "lppm") S3method("rhohat", "lpp") S3method("rhohat", "lppm") S3method("rjitter", "lpp") S3method("roc", "lpp") S3method("roc", "lppm") S3method("rotate", "linnet") S3method("rotate", "lpp") S3method("scalardilate", "linim") S3method("scalardilate", "linnet") S3method("scalardilate", "lpp") S3method("sdr", "lpp") S3method("shift", "linim") S3method("shift", "linnet") S3method("shift", "lpp") S3method("simulate", "lppm") S3method("subset", "lpp") S3method("summary", "linfun") S3method("summary", "linim") S3method("summary", "linnet") S3method("summary", "lintess") S3method("summary", "lpp") S3method("summary", "lppm") S3method("superimpose", "lpp") S3method("terms", "lppm") S3method("text", "lpp") S3method("tilenames", "lintess") S3method("unitname", "linnet") S3method("unitname", "lpp") S3method("unmark", "lintess") S3method("unmark", "lpp") S3method("unstack", "lintess") S3method("unstack", "lpp") S3method("update", "lppm") S3method("valid", "lppm") S3method("vcov", "lppm") S3method("vertices", "linnet") S3method("volume", "linnet") S3method("Window", "linnet") S3method("Window", "lintess") S3method("Window", "lpp") S3method("Window", "lppm") # ......................................... # Assignment methods # ......................................... S3method("[<-", "linim") S3method("marks<-", "lintess") S3method("marks<-", "lpp") S3method("tilenames<-", "lintess") S3method("unitname<-", "linnet") S3method("unitname<-", "lpp") S3method("Window<-", "linnet") S3method("Window<-", "lpp") # ......................................... # End of methods # ......................................... spatstat.linnet/man/0000755000176200001440000000000014155072546014171 5ustar liggesusersspatstat.linnet/man/nnwhich.lpp.Rd0000644000176200001440000000346714141460471016713 0ustar liggesusers\name{nnwhich.lpp} \alias{nnwhich.lpp} \title{ Identify Nearest Neighbours on a Linear Network } \description{ Given a pattern of points on a linear network, identify the nearest neighbour for each point, measured by the shortest path in the network. } \usage{ \method{nnwhich}{lpp}(X, ..., k=1, method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{k}{ Integer, or integer vector. The algorithm will find the \code{k}th nearest neighbour. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function finds the nearest neighbour of each point (i.e. for each point it identifies the nearest other point) measuring distance by the shortest path in the network. If \code{method="C"} the task is performed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. The result is \code{NA} if the \code{k}th nearest neighbour does not exist. This can occur if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. } \value{ An integer vector, of length equal to the number of points in \code{X}, identifying the nearest neighbour of each point. If \code{nnwhich(X)[2] = 4} then the nearest neighbour of point 2 is point 4. Alternatively a matrix with one row for each point in \code{X} and one column for each entry of \code{k}. } \author{ \adrian } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(10, simplenet) nnwhich(X) nnwhich(X, k=2) } \keyword{spatial} spatstat.linnet/man/thinNetwork.Rd0000644000176200001440000000521514141460471016770 0ustar liggesusers\name{thinNetwork} \alias{thinNetwork} \title{ Remove Vertices or Segments from a Linear Network } \description{ Delete some vertices and/or segments from a linear network or related object. } \usage{ thinNetwork(X, retainvertices, retainedges) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}), or a point pattern on a linear network (object of class \code{"lpp"}). } \item{retainvertices}{ Optional. Subset index specifying which vertices should be retained (not deleted). } \item{retainedges}{ Optional. Subset index specifying which edges (segments) should be retained (not deleted). } } \details{ This function deletes some of the vertices and edges (segments) in the linear network. The arguments \code{retainvertices} and \code{retainedges} can be any kind of subset index: a vector of positive integers specifying which vertices/edges should be retained; a vector of negative integers specifying which vertices/edges should be deleted; or a logical vector specifying whether each vertex/edge should be retained (\code{TRUE}) or deleted (\code{FALSE}). Vertices are indexed in the same sequence as in \code{vertices(as.linnet(X))}. Segments are indexed in the same sequence as in \code{as.psp(as.linnet(X))}. The argument \code{retainedges} has higher precedence than \code{retainvertices} in the sense that: \itemize{ \item If \code{retainedges} is given, then any vertex which is an endpoint of a retained edge will also be retained. \item If \code{retainvertices} is given and \code{retainedges} is \bold{missing}, then any segment joining two retained vertices will also be retained. \item Thus, when both \code{retainvertices} and \code{retainedges} are given, it is possible that more vertices will be retained than those specified by \code{retainvertices}. } After the network has been altered, other consequential changes will occur, including renumbering of the segments and vertices. If \code{X} is a point pattern on a linear network, then data points will be deleted if they lie on a deleted edge. } \value{ An object of the same kind as \code{X}. } \author{ \adrian and Suman Rakshit. } \seealso{ \code{\link{linnet}} to make a network; \code{\link{connected.linnet}} to extract connected components. \code{\link{repairNetwork}}. } \examples{ L <- simplenet plot(L, main="thinNetwork(L, retainedges=c(-3, -5))") text(midpoints.psp(as.psp(L)), labels=1:nsegments(L), pos=3) Lsub <- thinNetwork(L, retainedges=c(-3, -5)) plot(Lsub, add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/densityHeat.lpp.Rd0000644000176200001440000001433314141460471017530 0ustar liggesusers\name{densityHeat.lpp} \alias{densityHeat.lpp} \title{ Kernel Density on a Network using Heat Equation } \description{ Given a point pattern on a linear network, compute a kernel estimate of intensity, by solving the heat equation. } \usage{ \method{densityHeat}{lpp}(x, sigma, \dots, at=c("pixels", "points"), leaveoneout=TRUE, weights = NULL, dx = NULL, dt = NULL, iterMax = 1e+06, finespacing = TRUE, verbose=FALSE) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}) to be smoothed. } \item{sigma}{ Smoothing bandwidth (standard deviation of the kernel) in the same units as the spatial coordinates of \code{x}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the resolution of the result. (Any other arguments are ignored.) } \item{at}{ String specifying whether to compute the intensity values at a fine grid of pixel locations on the network (\code{at="pixels"}, the default) or only at the data points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{weights}{ Optional. Numeric vector of weights associated with the points of \code{x}. Weights may be positive, negative or zero. } \item{dx}{ Optional. Spacing of the sampling points along the network. A single number giving a distance value in the same units as \code{x}. } \item{dt}{ Optional. Time step in the heat equation solver. A single number. } \item{iterMax}{ Maximum number of iterations. } \item{finespacing}{ Logical value specifying whether the discrete approximation is required to be accurate along every segment of the network, no matter how short the segment is. See the section on Discretisation. } \item{verbose}{ Logical value specifying whether to print progress reports. } } \details{ The function \code{\link[spatstat.core]{densityHeat}} is generic. This is the method for the class \code{"lpp"} of points on a linear network. Kernel smoothing is applied to the points of \code{x} using a kernel based on path distances in the network. If \code{at="pixels"} (the default), the result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. If \code{at="points"} the result is a numeric vector giving the density estimates at the data points of \code{x}. The smoothing operation is equivalent to the \dQuote{equal-split continuous} rule described in Section 9.2.3 of Okabe and Sugihara (2012). However, the actual computation is performed rapidly, by solving the classical time-dependent heat equation on the network, as described in McSwiggan et al (2016). Computational time is short, but increases quadratically with \code{sigma}. If \code{at="points"} and \code{leaveoneout=TRUE}, a leave-one-out estimate is computed at each data point (that is, the estimate at each data point \code{x[i]} is based on all of the points except \code{x[i]}) using the truncated series approximation of McSwiggan et al (2019). } \section{Infinite bandwidth}{ If \code{sigma=Inf}, the resulting density estimate is constant over all locations, and is equal to the average density of points per unit length. (If the network is not connected, then this rule is applied separately to each connected component of the network). } \section{Discretisation and Error Messages}{ The arguments \code{dx}, \code{dt} and \code{iterMax} determine the discretisation of the network, according to a set of rules. The argument \code{finespacing} determines which rule will be applied. The arguments \code{dx}, \code{dt}, \code{iterMax} are connected by several constraints; specifying one of these arguments will affect the default values of the other two arguments. The argument \code{finespacing} specifies whether a very fine spacing of sample points is required, in order to attain hihg accuracy. \itemize{ \item If \code{finespacing=TRUE} (the default), then the sample point spacing \code{dx} must not exceed one-third of the length of the shortest segment of the network. This ensures that the discrete approximation is accurate along every segment, no matter how short the segment is. However, this may not be feasible if it implies a very large number of sample points, or a large number of iterations: in such cases, the code may terminate with an error about illegal values of \code{dx}, \code{dt} or \code{iterMax}. \item If \code{finespacing=FALSE}, then the sample point spacing \code{dx} will be about one-half the width of a pixel in the default pixellation of the window of \code{x}. This is usually a much coarser resolution than the one selected by \code{finespacing=TRUE}. If it is too coarse, the pixel resolution can be refined using the arguments \code{dimyx}, \code{eps} or \code{xy} passed to \code{\link{as.mask}}. For example, \code{dimyx=512} would specify a 512 x 512 pixel grid. The default pixel resolution can be changed for the remainder of the \R session by \code{\link{spatstat.options}('npixel')}. } } \value{ If \code{at="pixels"} (the default), a pixel image on the linear network (object of class \code{"linim"}). If \code{at="points"}, a numeric vector with one entry for each point of \code{x}. } \references{ McSwiggan, G., Baddeley, A. and Nair, G. (2016) Kernel density estimation on a linear network. \emph{Scandinavian Journal of Statistics} \bold{44}, 324--345. McSwiggan, G., Baddeley, A. and Nair, G. (2019) Estimation of relative risk for events on a linear network. \emph{Statistics and Computing} \bold{30}, 469--484. Okabe, A. and Sugihara, K. (2012) \emph{Spatial analysis along networks}. Wiley. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{density.lpp}} } \examples{ X <- runiflpp(3, simplenet) D <- densityHeat(X, 0.2) plot(D, style="w", main="", adjust=2) densityHeat.lpp(X, 0.2, at="points") Dw <- densityHeat(X, 0.2, weights=c(1,2,-1)) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/persp.linfun.Rd0000644000176200001440000000271114141460471017075 0ustar liggesusers\name{persp.linfun} \alias{persp.linfun} \title{ Perspective View of Function on a Linear Network } \description{ Given a function on a linear network, generate a perspective view. } \usage{ \method{persp}{linfun}(x, \dots, main, eps = NULL, dimyx = NULL, xy = NULL) } \arguments{ \item{x}{ The function to be plotted. An object of class \code{"linfun"}. } \item{\dots}{ Arguments passed to \code{\link{persp.linim}} controlling the appearance of the plot. } \item{main}{ Main title for the plot. } \item{eps,dimyx,xy}{ Arguments passed to \code{\link[spatstat.linnet]{as.linim}} determining the spatial resolution when the function is converted to an image. } } \details{ The function \code{x} is converted to a pixel image on the linear network using \code{\link[spatstat.linnet]{as.linim}}. Then \code{\link{persp.linim}} is invoked to generate the perspective plot. This style of plot is often attributed to Okabe and Sugihara (2012). } \value{ (Invisibly) the perspective transformation matrix, as described in the help for \code{\link[graphics]{persp.default}}. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{persp.linim}} } \examples{ f <- linfun(function(x,y,seg,tp) { abs(sin(25*x)) + abs(sin(15*y)) }, simplenet) persp(f, phi=20) } \references{ Okabe, A. and Sugihara, K. (2012) \emph{Spatial Analysis Along Networks}. John Wiley and Sons, New York. } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/linearpcfcross.inhom.Rd0000644000176200001440000001100314141460471020572 0ustar liggesusers\name{linearpcfcross.inhom} \alias{linearpcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdaJ} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link{linearpcf}}, \code{\link{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } g <- linearpcfcross.inhom(chicago, "assault", "robbery", lamI, lamJ) # using fitted models for intensity # fit <- lppm(chicago ~marks + x) # linearpcfcross.inhom(chicago, "assault", "robbery", fit, fit) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/linnet.Rd0000644000176200001440000000625514141460471015752 0ustar liggesusers\name{linnet} \alias{linnet} \title{ Create a Linear Network } \description{ Creates an object of class \code{"linnet"} representing a network of line segments. } \usage{ linnet(vertices, m, edges, sparse=FALSE, warn=TRUE) } \arguments{ \item{vertices}{ Point pattern (object of class \code{"ppp"}) specifying the vertices of the network. } \item{m}{ Adjacency matrix. A matrix or sparse matrix of logical values equal to \code{TRUE} when the corresponding vertices are joined by a line. (Specify either \code{m} or \code{edges}.) } \item{edges}{ Edge list. A two-column matrix of integers, specifying all pairs of vertices that should be joined by an edge. (Specify either \code{m} or \code{edges}.) } \item{sparse}{ Optional. Logical value indicating whether to use a sparse matrix representation of the network. See Details. } \item{warn}{ Logical value indicating whether to issue a warning if the resulting network is not connected. } } \details{ An object of class \code{"linnet"} represents a network of straight line segments in two dimensions. The function \code{linnet} creates such an object from the minimal information: the spatial location of each vertex (endpoint, crossing point or meeting point of lines) and information about which vertices are joined by an edge. If \code{sparse=FALSE} (the default), the algorithm will compute and store various properties of the network, including the adjacency matrix \code{m} and a matrix giving the shortest-path distances between each pair of vertices in the network. This is more efficient for small datasets. However it can require large amounts of memory and can take a long time to execute. If \code{sparse=TRUE}, then the shortest-path distances will not be computed, and the network adjacency matrix \code{m} will be stored as a sparse matrix. This saves a lot of time and memory when creating the linear network. If the argument \code{edges} is given, then it will also determine the \emph{ordering} of the line segments when they are stored or extracted. For example, \code{edges[i,]} corresponds to \code{as.psp(L)[i]}. } \value{ Object of class \code{"linnet"} representing the linear network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link[spatstat.data]{simplenet}} for an example of a linear network. \code{\link{methods.linnet}} for methods applicable to \code{linnet} objects. Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}}, \code{\link{joinVertices}}, \code{\link{connected.linnet}}, \code{\link{lixellate}}. \code{\link{delaunayNetwork}} for the Delaunay triangulation as a network. \code{\link[spatstat.geom]{ppp}}, \code{\link[spatstat.geom]{psp}}. } \examples{ # letter 'A' specified by adjacency matrix v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) m <- matrix(FALSE, 5,5) for(i in 1:4) m[i,i+1] <- TRUE m[2,4] <- TRUE m <- m | t(m) letterA <- linnet(v, m) plot(letterA) # letter 'A' specified by edge list edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) } \keyword{spatial} spatstat.linnet/man/plot.lppm.Rd0000644000176200001440000000250614141460471016401 0ustar liggesusers\name{plot.lppm} \alias{plot.lppm} \title{ Plot a Fitted Point Process Model on a Linear Network } \description{ Plots the fitted intensity of a point process model on a linear network. } \usage{ \method{plot}{lppm}(x, ..., type="trend") } \arguments{ \item{x}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to \code{\link{plot.linim}} to control the plot. } \item{type}{ Character string (either \code{"trend"} or \code{"cif"}) determining whether to plot the fitted first order trend or the conditional intensity. } } \details{ This function is the plot method for the class \code{"lppm"}. It computes the fitted intensity of the point process model, and displays it using \code{\link{plot.linim}}. The default is to display intensity values as colours. Alternatively if the argument \code{style="width"} is given, intensity values are displayed as the widths of thick lines drawn over the network. } \value{ Null. } \author{ \adrian } \seealso{ \code{\link{lppm}}, \code{\link{plot.linim}}, \code{\link{methods.lppm}}, \code{\link{predict.lppm}}. } \examples{ X <- runiflpp(10, simplenet) fit <- lppm(X ~x) plot(fit) plot(fit, style="width") } \keyword{spatial} \keyword{models} spatstat.linnet/man/affine.lpp.Rd0000644000176200001440000000512614141460471016477 0ustar liggesusers\name{affine.lpp} \alias{affine.lpp} \alias{shift.lpp} \alias{rotate.lpp} \alias{rescale.lpp} \alias{scalardilate.lpp} \title{Apply Geometrical Transformations to Point Pattern on a Linear Network} \description{ Apply geometrical transformations to a point pattern on a linear network. } \usage{ \method{affine}{lpp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{lpp}(X, vec=c(0,0), \dots, origin=NULL) \method{rotate}{lpp}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{lpp}(X, f, \dots) \method{rescale}{lpp}(X, s, unitname) } \arguments{ \item{X}{Point pattern on a linear network (object of class \code{"lpp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \value{ Another point pattern on a linear network (object of class \code{"lpp"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"lpp"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{lpp}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ X <- rpoislpp(2, simplenet) U <- rotate(X, pi) V <- shift(X, c(0.1, 0.2)) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear, vec=c(0, 1)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat.linnet/man/linearKcross.Rd0000644000176200001440000000564614141460471017123 0ustar liggesusers\name{linearKcross} \alias{linearKcross} \title{ Multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ data(chicago) K <- linearKcross(chicago, "assault", "robbery") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/linim.Rd0000644000176200001440000000601014141460471015556 0ustar liggesusers\name{linim} \alias{linim} \title{ Create Pixel Image on Linear Network } \description{ Creates an object of class \code{"linim"} that represents a pixel image on a linear network. } \usage{ linim(L, Z, \dots, restrict=TRUE, df=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{Z}{ Pixel image (object of class \code{"im"}). } \item{\dots}{Ignored.} \item{restrict}{ Advanced use only. Logical value indicating whether to ensure that all pixels in \code{Z} which do not lie on the network \code{L} have pixel value \code{NA}. This condition must be satisfied, but if you set \code{restrict=FALSE} it will not be checked, and the code will run faster. } \item{df}{ Advanced use only. Data frame giving full details of the mapping between the pixels of \code{Z} and the lines of \code{L}. See Details. } } \details{ This command creates an object of class \code{"linim"} that represents a pixel image defined on a linear network. Typically such objects are used to represent the result of smoothing or model-fitting on the network. Most users will not need to call \code{linim} directly. The argument \code{L} is a linear network (object of class \code{"linnet"}). It gives the exact spatial locations of the line segments of the network, and their connectivity. The argument \code{Z} is a pixel image object of class \code{"im"} that gives a pixellated approximation of the function values. For increased efficiency, advanced users may specify the optional argument \code{df}. This is a data frame giving the precomputed mapping between the pixels of \code{Z} and the line segments of \code{L}. It should have columns named \code{xc, yc} containing the coordinates of the pixel centres, \code{x,y} containing the projections of these pixel centres onto the linear network, \code{mapXY} identifying the line segment on which each projected point lies, and \code{tp} giving the parametric position of \code{(x,y)} along the segment. } \value{ Object of class \code{"linim"} that also inherits the class \code{"im"}. There is a special method for plotting this class. } \author{ \adrian } \seealso{ \code{\link{plot.linim}}, \code{\link{linnet}}, \code{\link{eval.linim}}, \code{\link{Math.linim}}, \code{\link{im}}. } \examples{ Z <- as.im(function(x,y) {x-y}, Frame(simplenet)) X <- linim(simplenet, Z) X } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} spatstat.linnet/man/branchlabelfun.Rd0000644000176200001440000000302014141460471017412 0ustar liggesusers\name{branchlabelfun} \alias{branchlabelfun} \title{ Tree Branch Membership Labelling Function } \description{ Creates a function which returns the tree branch membership label for any location on a linear network. } \usage{ branchlabelfun(L, root = 1) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). The network must have no loops. } \item{root}{ Root of the tree. An integer index identifying which point in \code{vertices(L)} is the root of the tree. } } \details{ The linear network \code{L} must be an acyclic graph (i.e. must not contain any loops) so that it can be interpreted as a tree. The result of \code{f <- branchlabelfun(L, root)} is a function \code{f} which gives, for each location on the linear network \code{L}, the tree branch label at that location. Tree branch labels are explained in \code{\link{treebranchlabels}}. The result \code{f} also belongs to the class \code{"linfun"}. It can be called using several different kinds of data, as explained in the help for \code{\link{linfun}}. The values of the function are character strings. } \value{ A function (of class \code{"linfun"}). } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{treebranchlabels}}, \code{\link{linfun}} } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) # make function f <- branchlabelfun(L, 1) plot(f) X <- runiflpp(5, L) f(X) } \keyword{spatial} \keyword{math} spatstat.linnet/man/methods.lppm.Rd0000644000176200001440000000525014141460471017065 0ustar liggesusers\name{methods.lppm} \alias{methods.lppm} %DoNotExport \alias{coef.lppm} \alias{emend.lppm} \alias{extractAIC.lppm} \alias{formula.lppm} \alias{logLik.lppm} \alias{deviance.lppm} \alias{nobs.lppm} \alias{print.lppm} \alias{summary.lppm} \alias{terms.lppm} \alias{update.lppm} \alias{valid.lppm} \alias{vcov.lppm} \alias{as.linnet.lppm} \alias{response.lppm} \title{ Methods for Fitted Point Process Models on a Linear Network } \description{ These are methods for the class \code{"lppm"} of fitted point process models on a linear network. } \usage{ \method{coef}{lppm}(object, ...) \method{emend}{lppm}(object, \dots) \method{extractAIC}{lppm}(fit, ...) \method{formula}{lppm}(x, ...) \method{logLik}{lppm}(object, ...) \method{deviance}{lppm}(object, ...) \method{nobs}{lppm}(object, ...) \method{print}{lppm}(x, ...) \method{summary}{lppm}(object, ...) \method{terms}{lppm}(x, ...) \method{update}{lppm}(object, ...) \method{valid}{lppm}(object, ...) \method{vcov}{lppm}(object, ...) \method{as.linnet}{lppm}(X, ...) \method{response}{lppm}(object) } \arguments{ \item{object,fit,x,X}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to other methods, usually the method for the class \code{"ppm"}. } } \details{ These are methods for the \R generic commands \code{\link[stats]{coef}}, \code{\link[stats]{extractAIC}}, \code{\link[stats]{formula}}, \code{\link[stats]{logLik}}, \code{\link[stats]{deviance}}, \code{\link[stats]{nobs}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[stats]{terms}}, \code{\link[stats]{update}} and \code{\link[stats]{vcov}}, and the \pkg{spatstat} generic commands \code{\link{as.linnet}}, \code{\link[spatstat.core]{emend}}, \code{\link[spatstat.core]{response}} and \code{\link[spatstat.core]{valid}}, for the class \code{"lppm"}. } \value{ For \code{as.linnet.lppm} a linear network (object of class \code{"linnet"}). For \code{emend.lppm} another fitted model of the same class \code{"lppm"}. For \code{response.lppm} a spatial point pattern on a linear network (object of class \code{"lpp"}). For \code{valid.lppm} a logical value. For the other methods, see the help for the default methods. } \author{ \spatstatAuthors. } \seealso{ \code{\link{lppm}}, \code{\link{plot.lppm}}. } \examples{ X <- runiflpp(15, simplenet) fit <- lppm(X ~ x) print(fit) coef(fit) formula(fit) terms(fit) logLik(fit) deviance(fit) nobs(fit) extractAIC(fit) update(fit, ~1) valid(fit) vcov(fit) response(fit) } \keyword{spatial} \keyword{models} spatstat.linnet/man/spatstat.linnet-deprecated.Rd0000644000176200001440000000137214155072546021714 0ustar liggesusers\name{spatstat.linnet-deprecated} \alias{circumradius.linnet} \alias{rjitterlpp} \title{Deprecated spatstat.linnet functions} \description{ Deprecated spatstat.linnet functions. } \usage{ \method{circumradius}{linnet}(x, \dots) rjitterlpp(X, \dots) } \details{ These functions are deprecated, and will eventually be deleted from the \pkg{spatstat.linnet} package. \code{rjitterlpp} is replaced by \code{rjitter.lpp}, a method for the generic \code{rjitter}. \code{circumradius.linnet} is replaced by the more appropriately named \code{boundingradius.linnet}. } \value{ \code{circumradius.linnet} returns a numeric value. \code{rjitterlpp} returns a point pattern on a linear network (object of class \code{"lpp"}). } \keyword{internal} spatstat.linnet/man/methods.linfun.Rd0000644000176200001440000000473414141460471017416 0ustar liggesusers\name{methods.linfun} \Rdversion{1.1} \alias{methods.linfun} %DoNotExport \alias{print.linfun} \alias{summary.linfun} \alias{plot.linfun} \alias{as.data.frame.linfun} \alias{as.owin.linfun} \alias{as.function.linfun} \title{ Methods for Functions on Linear Network } \description{ Methods for the class \code{"linfun"} of functions on a linear network. } \usage{ \method{print}{linfun}(x, \dots) \method{summary}{linfun}(object, \dots) \method{plot}{linfun}(x, \dots, L=NULL, main) \method{as.data.frame}{linfun}(x, \dots) \method{as.owin}{linfun}(W, \dots) \method{as.function}{linfun}(x, \dots) } \arguments{ \item{x,object,W}{ A function on a linear network (object of class \code{"linfun"}). } \item{L}{A linear network} \item{\dots}{ Extra arguments passed to \code{\link{as.linim}}, \code{\link{plot.linim}}, \code{\link{plot.im}} or \code{\link{print.default}}, or arguments passed to \code{x} if it is a function. } \item{main}{Main title for plot.} } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{print}}, \code{\link{summary}} \code{\link{as.data.frame}} and \code{\link{as.function}}, and for the \pkg{spatstat} generic function \code{\link{as.owin}}. An object of class \code{"linfun"} represents a mathematical function that could be evaluated at any location on a linear network. It is essentially an \R \code{function} with some extra attributes. The method \code{as.owin.linfun} extracts the two-dimensional spatial window containing the linear network. The method \code{plot.linfun} first converts the function to a pixel image using \code{\link{as.linim.linfun}}, then plots the image using \code{\link{plot.linim}}. Note that a \code{linfun} function may have additional arguments, other than those which specify the location on the network (see \code{\link{linfun}}). These additional arguments may be passed to \code{plot.linfun}. } \value{ For \code{print.linfun} and \code{summary.linfun} the result is \code{NULL}. For \code{plot.linfun} the result is the same as for \code{\link{plot.linim}}. For the conversion methods, the result is an object of the required type: \code{as.owin.linfun} returns an object of class \code{"owin"}, and so on. } \examples{ X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) as.function(f) as.owin(f) head(as.data.frame(f)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat.linnet/man/connected.lpp.Rd0000644000176200001440000000530414141460471017207 0ustar liggesusers\name{connected.lpp} \alias{connected.lpp} \title{ Connected Components of a Point Pattern on a Linear Network } \description{ Finds the topologically-connected components of a point pattern on a linear network, when all pairs of points closer than a threshold distance are joined. } \usage{ \method{connected}{lpp}(X, R=Inf, \dots, dismantle=TRUE) } \arguments{ \item{X}{ A linear network (object of class \code{"lpp"}). } \item{R}{ Threshold distance. Pairs of points will be joined together if they are closer than \code{R} units apart, measured by the shortest path in the network. The default \code{R=Inf} implies that points will be joined together if they are mutually connected by any path in the network. } \item{dismantle}{ Logical. If \code{TRUE} (the default), the network itself will be divided into its path-connected components using \code{\link{connected.linnet}}. } \item{\dots}{ Ignored. } } \details{ The function \code{connected} is generic. This is the method for point patterns on a linear network (objects of class \code{"lpp"}). It divides the point pattern \code{X} into one or more groups of points. If \code{R=Inf} (the default), then \code{X} is divided into groups such that any pair of points in the same group can be joined by a path in the network. If \code{R} is a finite number, then two points of \code{X} are declared to be \emph{R-close} if they lie closer than \code{R} units apart, measured by the length of the shortest path in the network. Two points are \emph{R-connected} if they can be reached by a series of steps between R-close pairs of points of \code{X}. Then \code{X} is divided into groups such that any pair of points in the same group is R-connected. If \code{dismantle=TRUE} (the default) the algorithm first checks whether the network is connected (i.e. whether any pair of vertices can be joined by a path in the network), and if not, the network is decomposed into its connected components. } \value{ A point pattern (of class \code{"lpp"}) with marks indicating the grouping, or a list of such point patterns. } \author{ \adrian. } \seealso{ \code{\link{thinNetwork}} } \examples{ ## behaviour like connected.ppp U <- runiflpp(20, simplenet) plot(connected(U, 0.15, dismantle=FALSE)) ## behaviour like connected.owin ## remove some edges from a network to make it disconnected plot(simplenet, col="grey", main="", lty=2) A <- thinNetwork(simplenet, retainedges=-c(3,5)) plot(A, add=TRUE, lwd=2) X <- runiflpp(10, A) ## find the connected components cX <- connected(X) plot(cX[[1]], add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/bw.lppl.Rd0000644000176200001440000001005114141460471016024 0ustar liggesusers\name{bw.lppl} \alias{bw.lppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density on a Linear Network } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity on a linear network. } \usage{ bw.lppl(X, \dots, srange=NULL, ns=16, sigma=NULL, weights=NULL, distance=c("euclidean", "path"), shortcut=TRUE, warn=TRUE) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } \item{sigma}{ Optional. Vector of values of the bandwidth to be searched. Overrides the values of \code{ns} and \code{srange}. } \item{weights}{ Optional. Numeric vector of weights for the points of \code{X}. Argument passed to \code{\link{density.lpp}}. } \item{distance}{ Argument passed to \code{\link{density.lpp}} controlling the type of kernel estimator. } \item{\dots}{ Additional arguments passed to \code{\link{density.lpp}}. } \item{shortcut}{ Logical value indicating whether to speed up the calculation by omitting the integral term in the cross-validation criterion. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.lpp}}. The argument \code{X} should be a point pattern on a linear network (class \code{"lpp"}). The bandwidth \eqn{\sigma}{\sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_L \hat\lambda(u) \, {\rm d}u }{ LCV(\sigma) = sum[i] log(\lambda[-i](x[i])) - integral[L] \lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{\lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{\sigma}, and \eqn{\hat\lambda(u)}{\lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{\sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(\sigma)} is computed directly, using \code{\link{density.lpp}}, for \code{ns} different values of \eqn{\sigma}{\sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. If \code{shortcut=TRUE}, the computation is accelerated by omitting the integral term in the equation above. This is valid because the integral is approximately constant. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.lpp}}, \code{\link{bw.scott}}. For point patterns in two-dimensional space, use \code{\link{bw.ppl}}. } \examples{ if(interactive()) { b <- bw.lppl(spiders) plot(b, main="Likelihood cross validation for spiders") plot(density(spiders, b, distance="e")) } else { b1 <- bw.lppl(spiders, ns=2) b2 <- bw.lppl(spiders, ns=2, shortcut=TRUE) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. McSwiggan, G., Baddeley, A. and Nair, G. (2019) Estimation of relative risk for events on a linear network. \emph{Statistics and Computing} \bold{30} (2) 469--484. } \author{ Greg McSwiggan, Suman Rakshit and \adrian. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/densityVoronoi.lpp.Rd0000644000176200001440000000662214141460471020304 0ustar liggesusers\name{densityVoronoi.lpp} \alias{densityVoronoi.lpp} \title{Intensity Estimate of Point Pattern on Linear Network Using Voronoi-Dirichlet Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern on a linear network, using the Dirichlet-Voronoi tessellation on the network. } \usage{ \method{densityVoronoi}{lpp}(X, f = 1, \dots, nrep = 1, verbose = TRUE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{f}{ Fraction (between 0 and 1 inclusive) of the data points that will be used to build a tessellation for the intensity estimate. } \item{\dots}{ Arguments passed to \code{\link{linim}} determining the pixel resolution of the result. } \item{nrep}{ Number of independent repetitions of the randomised procedure. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ This function is an alternative to \code{\link{density.lpp}}. It computes an estimate of the intensity function of a point pattern dataset on a linear network. The result is a pixel image on the network, giving the estimated intensity. This function is a method for the generic \code{\link{densityVoronoi}} for the class \code{"lpp"} of point patterns on a linear network. If \code{f=1} (the default), the Voronoi estimate (Barr and Schoenberg, 2010) is computed: the point pattern \code{X} is used to construct a Voronoi/Dirichlet tessellation on the network (see \code{\link{lineardirichlet}}); the lengths of the Dirichlet tiles are computed; the estimated intensity in each tile is the reciprocal of the tile length. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{f=0}, the intensity estimate at every location is equal to the average intensity (number of points divided by network length). The result is a pixel image of intensity estimates which are constant. If \code{f} is strictly between 0 and 1, the smoothed Voronoi estimate (Moradi et al, 2019) is computed. The dataset \code{X} is randomly thinned by deleting or retaining each point independently, with probability \code{f} of retaining a point. The thinned pattern is used to construct a Dirichlet tessellation and form the Voronoi estimate, which is then adjusted by a factor \code{1/f}. This procedure is repeated \code{nrep} times and the results are averaged to obtain the smoothed Voronoi estimate. The value \code{f} can be chosen automatically by bandwidth selection using \code{\link{bw.voronoi}}. } \value{ Pixel image on a linear network (object of class \code{"linim"}). } \references{ Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing}, in press. } \author{ \spatstatAuthors and Mehdi Moradi. } \seealso{ \code{\link{densityVoronoi}} is the generic, with a method for class \code{"ppp"}. \code{\link{lineardirichlet}} computes the Dirichlet-Voronoi tessellation on a network. \code{\link{bw.voronoi}} performs bandwidth selection of the fraction \code{f}. See also \code{\link{density.lpp}}. } \examples{ nr <- if(interactive()) 100 else 3 plot(densityVoronoi(spiders, 0.1, nrep=nr)) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/methods.lpp.Rd0000644000176200001440000000521714141460471016713 0ustar liggesusers\name{methods.lpp} \alias{methods.lpp} %DoNotExport \Rdversion{1.1} \alias{as.ppp.lpp} \alias{as.psp.lpp} \alias{marks<-.lpp} \alias{nsegments.lpp} \alias{print.lpp} \alias{print.summary.lpp} \alias{summary.lpp} \alias{unitname.lpp} \alias{unitname<-.lpp} \alias{unmark.lpp} \title{ Methods for Point Patterns on a Linear Network } \description{ These are methods specifically for the class \code{"lpp"} of point patterns on linear networks. } \usage{ \method{as.ppp}{lpp}(X, ..., fatal=TRUE) \method{as.psp}{lpp}(x, ..., fatal=TRUE) \method{marks}{lpp}(x, ...) <- value \method{nsegments}{lpp}(x) \method{print}{lpp}(x, ...) \method{print}{summary.lpp}(x, ...) \method{summary}{lpp}(object, ...) \method{unitname}{lpp}(x) \method{unitname}{lpp}(x) <- value \method{unmark}{lpp}(X) } \arguments{ \item{x,X,object}{ An object of class \code{"lpp"} representing a point pattern on a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ Replacement value for the \code{marks} or \code{unitname} of \code{x}. See Details. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } } \details{ These are methods for the generic functions \code{\link[spatstat.geom]{as.ppp}}, \code{\link[spatstat.geom]{as.psp}}, \code{\link[spatstat.geom]{marks<-}}, \code{\link[spatstat.geom]{nsegments}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link[spatstat.geom]{unitname}}, \code{\link[spatstat.geom]{unitname<-}} and \code{\link[spatstat.geom]{unmark}} for objects of the class \code{"lpp"}. For \code{"marks<-.lpp"} the replacement \code{value} should be either \code{NULL}, or a vector of length equal to the number of points in \code{x}, or a data frame with one row for each point in \code{x}. For \code{"unitname<-.lpp"} the replacement \code{value} should be a valid name for the unit of length, as described in \code{\link[spatstat.geom]{unitname}}. } \section{Other methods}{ An object of class \code{"lpp"} also inherits the class \code{"ppx"} for which many other methods are available. See \code{\link[spatstat.geom:methods.ppx]{methods.ppx}}. } \value{ See the documentation on the corresponding generic function. } \author{ \adrian } \seealso{ \code{\link{lpp}}, \code{\link{intensity.lpp}}, \code{\link[spatstat.geom:methods.ppx]{methods.ppx}} } \examples{ X <- runiflpp(10, simplenet) unitname(X) <- c("furlong", "furlongs") X summary(X) summary(chicago) nsegments(X) Y <- as.ppp(X) } \keyword{spatial} \keyword{methods} spatstat.linnet/man/pairs.linim.Rd0000644000176200001440000000402114141460471016673 0ustar liggesusers\name{pairs.linim} \alias{pairs.linim} \title{ Scatterplot Matrix for Pixel Images on a Linear Network } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images on a linear network. } \usage{ \method{pairs}{linim}(..., plot=TRUE, eps=NULL) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image on a linear network (object of class \code{"linim"}), a pixel image (object of class \code{"im"}), or a named argument to be passed to \code{\link{pairs.default}}. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } \item{eps}{ Optional. Spacing between sample points on the network. A positive number. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images on a linear network. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be a pixel image on a linear network (object of class \code{"linim"}). They should be defined on the \bold{same} linear network, but may have different pixel resolutions. First the pixel values of each image are extracted at a set of sample points equally-spaced across the network. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs.default}}, \code{\link{pairs.im}} } \examples{ fit <- lppm(chicago ~ marks * (x+y)) lam <- predict(fit) do.call(pairs, lam) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/Extract.linim.Rd0000644000176200001440000000402114141460471017167 0ustar liggesusers\name{Extract.linim} \alias{[.linim} \title{Extract Subset of Pixel Image on Linear Network} \description{ Extract a subset of a pixel image on a linear network. } \usage{ \method{[}{linim}(x, i, \dots, drop=TRUE) } \arguments{ \item{x}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{i}{ Spatial window defining the subregion. Either a spatial window (an object of class \code{"owin"}), or a logical-valued pixel image, or any type of index that applies to a matrix, or a point pattern (an object of class \code{"lpp"} or \code{"ppp"}), or something that can be converted to a point pattern by \code{\link{as.lpp}} (using the network on which \code{x} is defined). } \item{\dots}{Additional arguments passed to \code{[.im}.} \item{drop}{Logical value indicating whether \code{NA} values should be omitted from the result.} } \value{ Another pixel image on a linear network (object of class \code{"linim"}) or a vector of pixel values. } \details{ This function is a method for the subset operator \code{"["} for pixel images on linear networks (objects of class \code{"linim"}). The pixel image \code{x} will be restricted to the domain specified by \code{i}. Pixels outside the domain of \code{x} are assigned the value \code{NA}; if \code{drop=TRUE} (the default) such \code{NA} values are deleted from the result; if \code{drop=FALSE}, then \code{NA} values are retained. If \code{i} is a window (or a logical-valued pixel image) then \code{x[i]} is another pixel image of class \code{"linim"}, representing the restriction of \code{x} to the spatial domain specified by \code{i}. If \code{i} is a point pattern, then \code{x[i]} is the vector of pixel values of \code{x} at the locations specified by \code{i}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y){x}, W=M) Y <- linim(simplenet, Z) X <- runiflpp(4, simplenet) Y[X] Y[square(c(0.3, 0.6))] } \author{ \adrian } \keyword{spatial} \keyword{manip} spatstat.linnet/man/relrisk.lpp.Rd0000644000176200001440000002145114155071513016721 0ustar liggesusers\name{relrisk.lpp} \alias{relrisk.lpp} \title{ Nonparametric Estimate of Spatially-Varying Relative Risk on a Network } \description{ Given a multitype point pattern on a linear network, this function estimates the spatially-varying probability of each type of point, or the ratios of such probabilities, using kernel smoothing. } \usage{ \method{relrisk}{lpp}(X, sigma, ..., at = c("pixels", "points"), relative=FALSE, adjust=1, casecontrol=TRUE, control=1, case, finespacing=FALSE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"lpp"} which has factor valued marks). } \item{sigma}{ The numeric value of the smoothing bandwidth (the standard deviation of Gaussian smoothing kernel) passed to \code{\link{density.lpp}}. Alternatively \code{sigma} may be a function which can be used to select the bandwidth. See Details. } \item{\dots}{ Arguments passed to \code{\link{density.lpp}} to control the pixel resolution. } \item{at}{ Character string specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{relative}{ Logical. If \code{FALSE} (the default) the algorithm computes the probabilities of each type of point. If \code{TRUE}, it computes the \emph{relative risk}, the ratio of probabilities of each type relative to the probability of a control. } \item{adjust}{ Optional. Adjustment factor for the bandwidth \code{sigma}. } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls, and return only the probability or relative risk of a case. Ignored if there are more than 2 types of points. See Details. } \item{control}{ Integer, or character string, identifying which mark value corresponds to a control. } \item{case}{ Integer, or character string, identifying which mark value corresponds to a case (rather than a control) in a bivariate point pattern. This is an alternative to the argument \code{control} in a bivariate point pattern. Ignored if there are more than 2 types of points. } \item{finespacing}{ Logical value specifying whether to use a finer spatial resolution (with longer computation time but higher accuracy). } } \details{ The command \code{\link{relrisk}} is generic and can be used to estimate relative risk in different ways. This function \code{relrisk.lpp} is the method for point patterns on a linear network (objects of class \code{"lpp"}). It computes \emph{nonparametric} estimates of relative risk by kernel smoothing. If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then by default this command computes the spatially-varying \emph{probability} of a case, i.e. the probability \eqn{p(u)} that a point at location \eqn{u} on the network will be a case. If \code{relative=TRUE}, it computes the spatially-varying \emph{relative risk} of a case relative to a control, \eqn{r(u) = p(u)/(1- p(u))}. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then by default this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying \emph{probability} of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at location \eqn{u} on the network will belong to type \eqn{j}. If \code{relative=TRUE}, the command computes the \emph{relative risk} of an event of type \eqn{j} relative to a control, \eqn{r_j(u) = p_j(u)/p_k(u)}{r[j](u) = p[j](u)/p[k](u)}, where events of type \eqn{k} are treated as controls. The argument \code{control} determines which type \eqn{k} is treated as a control. If \code{at = "pixels"} the calculation is performed for every location \eqn{u} on a fine pixel grid over the network, and the result is a pixel image on the network representing the function \eqn{p(u)}, or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} or \eqn{r_j(u)}{r[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{NA}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. By default the result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. If \code{relative=TRUE} then the relative risks \eqn{r(x_i)}{r(x[i])} or \eqn{r_j(x_i)}{r[j](x[i])} are returned. An infinite value of relative risk (arising because the probability of a control is zero) will be returned as \code{Inf}. Estimation is performed by a Nadaraja-Watson type kernel smoother (McSwiggan et al., 2019). The smoothing bandwidth \code{sigma} should be a single numeric value, giving the standard deviation of the isotropic Gaussian kernel. If \code{adjust} is given, the smoothing bandwidth will be \code{adjust * sigma} before the computation of relative risk. Alternatively, \code{sigma} may be a function that can be applied to the point pattern \code{X} to select a bandwidth; the function must return a single numerical value; examples include the functions \code{\link{bw.relrisklpp}} and \code{\link{bw.scott.iso}}. Accuracy depends on the spatial resolution of the density computations. If the arguments \code{dx} and \code{dt} are present, they are passed to \code{\link{density.lpp}} to determine the spatial resolution. Otherwise, the spatial resolution is determined by a default rule that depends on \code{finespacing} and \code{sigma}. If \code{finespacing=FALSE} (the default), the spatial resolution is equal to the default resolution for pixel images. If \code{finespacing=TRUE}, the spatial resolution is much finer and is determined by a rule which guarantees higher accuracy, but takes a longer time. } \value{ If \code{X} consists of only two types of points, and if \code{casecontrol=TRUE}, the result is a pixel image on the network (if \code{at="pixels"}) or a vector (if \code{at="points"}). The pixel values or vector values are the probabilities of a case if \code{relative=FALSE}, or the relative risk of a case (probability of a case divided by the probability of a control) if \code{relative=TRUE}. If \code{X} consists of more than two types of points, or if \code{casecontrol=FALSE}, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images on the network, with one image for each possible type of point. The result also belongs to the class \code{"solist"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } The pixel values or matrix entries are the probabilities of each type of point if \code{relative=FALSE}, or the relative risk of each type (probability of each type divided by the probability of a control) if \code{relative=TRUE}. If \code{relative=FALSE}, the resulting values always lie between 0 and 1. If \code{relative=TRUE}, the results are either non-negative numbers, or the values \code{Inf} or \code{NA}. } \seealso{ \code{\link{relrisk}} } \examples{ ## case-control data: 2 types of points set.seed(2020) X <- superimpose(A=runiflpp(20, simplenet), B=runifpointOnLines(20, as.psp(simplenet)[1])) plot(X) plot(relrisk(X, 0.2)) plot(relrisk(X, 0.2, case="B")) head(relrisk(X, 0.2, at="points")) ## cross-validated bandwidth selection plot(relrisk(X, bw.relrisklpp, hmax=0.25)) ## more than 2 types if(interactive()) { U <- chicago sig <- 170 } else { U <- do.call(superimpose, split(chicago)[c("theft", "cartheft", "burglary")]) sig <- 50 } plot(relrisk(U, sig)) head(relrisk(U, sig, at="points")) plot(relrisk(U, sig, relative=TRUE, control="theft")) } \references{ McSwiggan, G., Baddeley, A. and Nair, G. (2019) Estimation of relative risk for events on a linear network. \emph{Statistics and Computing} \bold{30} (2) 469--484. } \author{ Greg McSwiggan and \adrian. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/plot.linim.Rd0000644000176200001440000001443714141460471016547 0ustar liggesusers\name{plot.linim} \alias{plot.linim} \title{ Plot Pixel Image on Linear Network } \description{ Given a pixel image on a linear network, the pixel values are displayed either as colours or as line widths. } \usage{ \method{plot}{linim}(x, ..., style = c("colour", "width"), scale, adjust = 1, fatten = 0, negative.args = list(col=2), legend=TRUE, leg.side=c("right", "left", "bottom", "top"), leg.sep=0.1, leg.wid=0.1, leg.args=list(), leg.scale=1, zlim, box=FALSE, do.plot=TRUE) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"linim"}. } \item{\dots}{ Extra graphical parameters, passed to \code{\link{plot.im}} if \code{style="colour"}, or to \code{\link[graphics]{polygon}} if \code{style="width"}. } \item{style}{ Character string (partially matched) specifying the type of plot. See Details. } \item{scale}{ Physical scale factor for representing the pixel values as line widths. } \item{adjust}{ Adjustment factor for the conversion of pixel value to line width, when \code{style="width"}. } \item{fatten}{ Distance by which the line segments should be thickened, when \code{style="colour"}. } \item{negative.args}{ A list of arguments to be passed to \code{\link[graphics]{polygon}} specifying how to plot negative values of \code{x} when \code{style="width"}. } \item{legend}{ Logical value indicating whether to plot a legend (colour ribbon or scale bar). } \item{leg.side}{ Character string (partially matched) indicating where to display the legend relative to the main image. } \item{leg.sep}{ Factor controlling the space between the legend and the image. } \item{leg.wid}{ Factor controlling the width of the legend. } \item{leg.scale}{ Rescaling factor for annotations on the legend. The values on the numerical scale printed beside the legend will be multiplied by this rescaling factor. } \item{leg.args}{ List of additional arguments passed to \code{\link[graphics]{image.default}}, \code{\link[graphics]{axis}} or \code{\link[graphics]{text.default}} to control the display of the legend. These may override the \code{\dots} arguments. } \item{zlim}{ The range of numerical values that should be mapped. A numeric vector of length 2. Defaults to the range of values of \code{x}. } \item{box}{ Logical value indicating whether to draw a bounding box. } \item{do.plot}{ Logical value indicating whether to actually perform the plot. } } \details{ This is the \code{plot} method for objects of class \code{"linim"}. Such an object represents a pixel image defined on a linear network. If \code{style="colour"} (the default) then the pixel values of \code{x} are plotted as colours, using \code{\link{plot.im}}. The mapping from pixel values to colours is determined by any additional arguments \code{\dots} which are passed to \code{\link{plot.im}}. If \code{style="width"} then the pixel values of \code{x} are used to determine the widths of thick lines centred on the line segments of the linear network. The mapping from pixel values to line widths is determined by the arguments \code{scale} and \code{adjust}. The plotting of colours and borders of the lines is controlled by the additional arguments \code{\dots} which are passed to \code{\link[graphics]{polygon}}. A different set of colours and borders can be assigned to negative pixel values by passing a list of arguments in \code{negative.args} as shown in the Examples. A legend is displayed alongside the plot if \code{legend=TRUE} (the default). The legend displays the relationship between pixel values and colours (if \code{style="colour"}) or between pixel values and line widths (if \code{style="width"}). The plotting of the legend itself is controlled by the arguments \code{leg.side}, \code{leg.sep}, \code{leg.wid}, \code{leg.scale} and the list of arguments \code{leg.args}, which are described above. If \code{style="colour"}, these arguments are mapped to the arguments \code{ribside}, \code{ribsep}, \code{ribwid}, \code{ribscale} and \code{ribargs} respectively, which are passed to \code{\link{plot.im}}. } \section{Thin lines}{ When \code{style="colour"} it often appears that the lines are drawn too thin. This occurs because \code{x} is a pixel image, in which the only pixels that have a defined value are those which lie directly over the network. To make the lines appear thicker in the plot, use the argument \code{fatten}. The domain of the image will be expanded by a distance equal to \code{fatten/2} in every direction using \code{\link{dilation.owin}}; the pixel values will be extrapolated to this expanded domain using \code{\link{nearestValue}}. This may improve the visual appearance of the plot. } \value{ If \code{style="colour"}, the result is an object of class \code{"colourmap"} specifying the colour map used. If \code{style="width"}, the result is a numeric value \code{v} giving the physical scale: one unit of pixel value is represented as \code{v} physical units on the plot. The result also has an attribute \code{"bbox"} giving a bounding box for the plot. The bounding box includes the ribbon or scale bar, if present, but not the main title. } \author{ \adrian } \seealso{ \code{\link{linim}}, \code{\link{plot.im}}, \code{\link[graphics]{polygon}} } \references{ Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \examples{ X <- linfun(function(x,y,seg,tp){y^2+x}, simplenet) X <- as.linim(X) plot(X, main="Colour represents function value") plot(X, fatten=0.02, main="fattened") plot(X, style="width", main="Width proportional to function value") # signed values f <- linfun(function(x,y,seg,tp){y-x}, simplenet) plot(f, style="w", main="Negative values in red") plot(f, style="w", negative.args=list(density=10), main="Negative values are hatched") } \keyword{spatial} spatstat.linnet/man/subset.lpp.Rd0000644000176200001440000001026614141460471016555 0ustar liggesusers\name{subset.lpp} \alias{subset.lpp} \title{ Subset of Point Pattern Satisfying A Condition } \description{ Given a point pattern on a linear network, return the subset of points which satisfy a specified condition. } \usage{ \method{subset}{lpp}(x, subset, select, drop=FALSE, \dots) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{subset}{ Logical expression indicating which points are to be kept. The expression may involve the names of spatial coordinates (\code{x}, \code{y}), network coordinates (\code{seg}, \code{tp}), the \code{marks}, and (if there is more than one column of marks) the names of individual columns of marks. Missing values are taken as false. See Details. } \item{select}{ Expression indicating which columns of marks should be kept. The \emph{names} of columns of marks can be used in this expression, and will be treated as if they were column indices. See Details. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[base]{subset}}. It extracts the subset of points of \code{x} that satisfy the logical expression \code{subset}, and retains only the columns of marks that are specified by the expression \code{select}. The result is always a point pattern, with the same window as \code{x}. The argument \code{subset} determines the subset of points that will be extracted. It should be a logical expression. It may involve the variable names \code{x} and \code{y} representing the Cartesian coordinates; the names of other spatial coordinates or local coordinates; the name \code{marks} representing the marks; and (if there is more than one column of marks) the names of individual columns of marks. The default is to keep all points. The argument \code{select} determines which columns of marks will be retained (if there are several columns of marks). It should be an expression involving the names of columns of marks (which will be interpreted as integers representing the positions of these columns). For example if there are columns of marks named \code{A} to \code{Z}, then \code{select=D:F} is a valid expression and means that columns \code{D}, \code{E} and \code{F} will be retained. Similarly \code{select=-(A:C)} is valid and means that columns \code{A} to \code{C} will be deleted. The default is to retain all columns. Setting \code{subset=FALSE} will produce an empty point pattern (i.e. containing zero points) in the same window as \code{x}. Setting \code{select=FALSE} or \code{select= -marks} will remove all the marks from \code{x}. The argument \code{drop} determines whether to remove unused levels of a factor, if the resulting point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame in which some of the columns are factors. The result is always a point pattern, of the same class as \code{x}. Spatial coordinates (and local coordinates) are always retained. To extract only some columns of marks or coordinates as a data frame, use \code{subset(as.data.frame(x), ...)} } \section{Other kinds of subset arguments}{ Alternatively the argument \code{subset} can be any kind of subset index acceptable to \code{\link{[.lpp}}. This argument selects which points of \code{x} will be retained. \bold{Warning:} if the argument \code{subset} is a window, this is interpreted as specifying the subset of points that fall inside that window, but the resulting point pattern has the same window as the original pattern \code{x}. } \value{ A point pattern of the same class as \code{x}, in the same spatial window as \code{x}. The result is a subset of \code{x}, possibly with some columns of marks removed. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.geom]{subset.ppp}}, \code{\link{[.lpp}}. } \examples{ v <- subset(chicago, x + y > 1100 & marks == "assault") vv <- subset(chicago, x + y > 1100 & marks == "assault", drop=TRUE) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/affine.linnet.Rd0000644000176200001440000000505514141460471017176 0ustar liggesusers\name{affine.linnet} \alias{affine.linnet} \alias{shift.linnet} \alias{rotate.linnet} \alias{rescale.linnet} \alias{scalardilate.linnet} \title{Apply Geometrical Transformations to a Linear Network} \description{ Apply geometrical transformations to a linear network. } \usage{ \method{affine}{linnet}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{linnet}(X, vec=c(0,0), \dots, origin=NULL) \method{rotate}{linnet}(X, angle=pi/2, \dots, centre=NULL) \method{scalardilate}{linnet}(X, f, \dots) \method{rescale}{linnet}(X, s, unitname) } \arguments{ \item{X}{Linear network (object of class \code{"linnet"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } \item{origin}{ Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } \item{centre}{ Centre of rotation. Either a vector of length 2, or a character string (partially matched to \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}). The default is the coordinate origin \code{c(0,0)}. } \item{unitname}{ Optional. New name for the unit of length. A value acceptable to the function \code{\link{unitname<-}} } } \value{ Another linear network (of class \code{"linnet"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"linnet"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{linnet}} and \code{\link{as.linnet}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ U <- rotate(simplenet, pi) stretch <- diag(c(2,3)) Y <- affine(simplenet, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(simplenet, mat=shear, vec=c(0, 1)) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat.linnet/man/spatstat.linnet-internal.Rd0000644000176200001440000001146014144334010021410 0ustar liggesusers\name{spatstat.linnet-internal} \title{Internal spatstat.linnet functions} %% Linear networks code is corralled here \alias{ApplyConnected} \alias{DoCountCrossEnds} \alias{DoCountEnds} \alias{FDMKERNEL} \alias{as.linfun.linfun} \alias{as.owin.lintess} \alias{default.linnet.tolerance} \alias{makeLinnetTolerance} \alias{print.lintess} \alias{print.summary.linim} \alias{print.summary.linnet} \alias{print.summary.lintess} \alias{resolve.heat.steps} \alias{summary.lintess} \alias{nobjects.lintess} \alias{Window.lintess} \alias{Window<-.linnet} \alias{Window<-.lpp} \alias{densitypointsLPP} \alias{evalCovar.lppm} \alias{flatdensityfunlpp} \alias{flatdensityatpointslpp} \alias{getlambda.lpp} \alias{local2lpp} \alias{looHeatLPP} \alias{looVoronoiLPP} \alias{validate.lpp.coords} \alias{as.ppm.lppm} \alias{pointsAlongNetwork} \alias{linearKengine} \alias{linearKmulti} \alias{linearKmulti.inhom} \alias{linearKmultiEngine} \alias{linearpcfengine} \alias{linearpcfmulti} \alias{linearpcfmulti.inhom} \alias{linearPCFmultiEngine} \alias{resampleNetworkDataFrame} \alias{sortalongsegment} \alias{vnnFind} \alias{ldtEngine} \alias{qkdeEngine} \alias{Math.linimlist} \alias{Ops.linimlist} \alias{Summary.linimlist} \alias{Complex.linimlist} \alias{LinimOp} \alias{LinimListOp} %%%%%%% \description{ Internal spatstat.linnet functions. } \usage{ %% Linear networks code is corralled here ApplyConnected(X, Engine, r, \dots, rule, auxdata) DoCountEnds(X, D, toler) DoCountCrossEnds(X, I, J, DIJ, toler) FDMKERNEL(lppobj, dtt, dtx, M, nsave, weights, stepnames, setuponly, verbose) \method{as.linfun}{linfun}(X, \dots) \method{as.owin}{lintess}(W, \dots) default.linnet.tolerance(L) makeLinnetTolerance(toler) \method{print}{lintess}(x, \dots) \method{print}{summary.linim}(x, \dots) \method{print}{summary.linnet}(x, \dots) \method{print}{summary.lintess}(x, \dots) \method{summary}{lintess}(object, \dots) \method{nobjects}{lintess}(x) \method{Window}{lintess}(X, \dots) \method{Window}{linnet}(X, \dots, check=TRUE) <- value \method{Window}{lpp}(X, \dots, check=TRUE) <- value densitypointsLPP(x, sigma, \dots, weights, nsigma, leaveoneout, fast, fastmethod, floored, dx, dt, iterMax, verbose, debug) \method{evalCovar}{lppm}(model, covariate, \dots, lambdatype, eps, dimyx, xy, delta, nd, interpolate, jitter, jitterfactor, modelname, covname, dataname, subset) flatdensityfunlpp(X, \dots, disconnect, weights, what) flatdensityatpointslpp(X, \dots, leaveoneout, disconnect, weights, what) getlambda.lpp(lambda, X, subset, \dots, update, leaveoneout, loo.given, lambdaname) local2lpp(L, seg, tp, X, df.only) looHeatLPP(U0, Amatrix, npts, niter, nsave, lixelweight, lixelmap, verbose) looVoronoiLPP(X) validate.lpp.coords(X, fatal, context) \method{as.ppm}{lppm}(object) pointsAlongNetwork(L, delta) linearKengine(X, \dots, r, reweight, denom, correction, ratio, showworking) linearKmulti(X, I, J, r, \dots, correction) linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearpcfengine(X, \dots, r, reweight, denom, correction, ratio) linearpcfmulti(X, I, J, r, \dots, correction) linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearKmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) linearPCFmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) resampleNetworkDataFrame(df, template) sortalongsegment(df) vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax) ldtEngine(nv, ns, from, to, seglen, huge, coUXord, vnndist, vnnwhich, vnnlab) resolve.heat.steps(sigma, \dots, dx, dt, niter, iterMax, nsave, seglengths, maxdegree, AMbound, L, finespacing, fineNsplit, fineNlixels, W, eps, dimyx, xy, allow.adjust, warn.adjust, verbose, stepnames) qkdeEngine(X, sigma, \dots, at, what, leaveoneout, diggle, raw, edge2D, edge, weights, varcov, positive, shortcut, precomputed, savecomputed) \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm = FALSE)} %NAMESPACE S3method("Math", "linimlist") %NAMESPACE S3method("Ops", "linimlist") %NAMESPACE S3method("Complex", "linimlist") %NAMESPACE S3method("Summary", "linimlist") LinimOp(e1, e2, op) LinimListOp(e1, e2, op) } \details{ These internal \pkg{spatstat.linnet} functions should not be called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat.linnet} to the next. } \value{ The return values of these functions are not documented, and may change without warning. } \keyword{internal} spatstat.linnet/man/eval.linim.Rd0000644000176200001440000000572314141460471016516 0ustar liggesusers\name{eval.linim} \alias{eval.linim} \title{Evaluate Expression Involving Pixel Images on Linear Network} \description{ Evaluates any expression involving one or more pixel images on a linear network, and returns a pixel image on the same linear network. } \usage{ eval.linim(expr, envir, harmonize=TRUE, warn=TRUE) } \arguments{ \item{expr}{An expression in the \R language, involving the names of objects of class \code{"linim"}.} \item{envir}{Optional. The environment in which to evaluate the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } \item{warn}{ Logical. Whether to issue a warning if the pixel grids were inconsistent. } } \details{ This function a wrapper to make it easier to perform pixel-by-pixel calculations. It is one of several functions whose names begin with \code{eval} which work on objects of different types. This particular function is designed to work with objects of class \code{"linim"} which represent pixel images on a linear network. Suppose \code{X} is a pixel image on a linear network (object of class \code{"linim"}. Then \code{eval.linim(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image on the same linear network. Suppose \code{X} and \code{Y} are two pixel images on the same linear network, with compatible pixel dimensions. Then \code{eval.linim(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image on the same linear network. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.linim} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one linear pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=FALSE}, images that are incompatible will cause an error. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible; if \code{warn=TRUE}, a warning will be issued. } \value{ An image object of class \code{"linim"}. } \seealso{ \code{\link{eval.im}}, \code{\link{linim}} } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X Y <- linfun(function(x,y,seg,tp){y^2+x}, simplenet) Y <- as.linim(Y) eval.linim(X + 3) eval.linim(X - Y) eval.linim(abs(X - Y)) Z <- eval.linim(sin(X * pi) + Y) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat.linnet/man/lpp.Rd0000644000176200001440000000727314141460471015255 0ustar liggesusers\name{lpp} \alias{lpp} \title{ Create Point Pattern on Linear Network } \description{ Creates an object of class \code{"lpp"} that represents a point pattern on a linear network. } \usage{ lpp(X, L, \dots) } \arguments{ \item{X}{ Locations of the points. A matrix or data frame of coordinates, or a point pattern object (of class \code{"ppp"}) or other data acceptable to \code{\link{as.ppp}}. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Ignored. } } \details{ This command creates an object of class \code{"lpp"} that represents a point pattern on a linear network. Normally \code{X} is a point pattern. The points of \code{X} should lie on the lines of \code{L}. Alternatively \code{X} may be a matrix or data frame containing at least two columns. \itemize{ \item Usually the first two columns of \code{X} will be interpreted as spatial coordinates, and any remaining columns as marks. \item An exception occurs if \code{X} is a data frame with columns named \code{x}, \code{y}, \code{seg} and \code{tp}. Then \code{x} and \code{y} will be interpreted as spatial coordinates, and \code{seg} and \code{tp} as local coordinates, with \code{seg} indicating which line segment of \code{L} the point lies on, and \code{tp} indicating how far along the segment the point lies (normalised to 1). Any remaining columns will be interpreted as marks. \item Another exception occurs if \code{X} is a data frame with columns named \code{seg} and \code{tp}. Then \code{seg} and \code{tp} will be interpreted as local coordinates, as above, and the spatial coordinates \code{x,y} will be computed from them. Any remaining columns will be interpreted as marks. } If \code{X} is missing or \code{NULL}, the result is an empty point pattern (i.e. containing no points). } \section{Note on changed format}{ The internal format of \code{"lpp"} objects was changed in \pkg{spatstat} version \code{1.28-0}. Objects in the old format are still handled correctly, but computations are faster in the new format. To convert an object \code{X} from the old format to the new format, use \code{X <- lpp(as.ppp(X), as.linnet(X))}. } \value{ An object of class \code{"lpp"}. Also inherits the class \code{"ppx"}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ Installed datasets which are \code{"lpp"} objects: \code{\link[spatstat.data]{chicago}}, \code{\link[spatstat.data]{dendrite}}, \code{\link[spatstat.data]{spiders}}. See \code{\link{as.lpp}} for converting data to an \code{lpp} object. See \code{\link{methods.lpp}} and \code{\link{methods.ppx}} for other methods applicable to \code{lpp} objects. Calculations on an \code{lpp} object: \code{\link{intensity.lpp}}, \code{\link{distfun.lpp}}, \code{\link{nndist.lpp}}, \code{\link{nnwhich.lpp}}, \code{\link{nncross.lpp}}, \code{\link{nnfun.lpp}}. Summary functions: \code{\link{linearK}}, \code{\link{linearKinhom}}, \code{\link{linearpcf}}, \code{\link{linearKdot}}, \code{\link{linearKcross}}, \code{\link{linearmarkconnect}}, etc. Random point patterns on a linear network can be generated by \code{\link{rpoislpp}} or \code{\link{runiflpp}}. See \code{\link{linnet}} for linear networks. } \examples{ # letter 'A' v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) # points on letter A xx <- list(x=c(-1.5,0,0.5,1.5), y=c(1.5,3,4.5,1.5)) X <- lpp(xx, letterA) plot(X) X summary(X) # empty pattern lpp(L=letterA) } \keyword{spatial} spatstat.linnet/man/rSwitzerlpp.Rd0000644000176200001440000000504714141460471017024 0ustar liggesusers\name{rSwitzerlpp} \alias{rSwitzerlpp} \title{ Switzer-type Point Process on Linear Network } \description{ Generate a realisation of the Switzer-type point process on a linear network. } \usage{ rSwitzerlpp(L, lambdacut, rintens = rexp, \dots, cuts=c("points", "lines")) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{lambdacut}{ Intensity of Poisson process of breakpoints. } \item{rintens}{ Optional. Random variable generator used to generate the random intensity in each component. } \item{\dots}{ Additional arguments to \code{rintens}. } \item{cuts}{ String (partially matched) specifying the type of random cuts to be generated. } } \details{ This function generates simulated realisations of the Switzer-type point process on a network, as described in Baddeley et al (2017). The linear network is first divided into pieces by a random mechanism: \itemize{ \item if \code{cuts="points"}, a Poisson process of breakpoints with intensity \code{lambdacut} is generated on the network, and these breakpoints separate the network into connected pieces. \item if \code{cuts="lines"}, a Poisson line process in the plane with intensity \code{lambdacut} is generated; these lines divide space into tiles; the network is divided into subsets associated with the tiles. Each subset may not be a connected sub-network. } In each piece of the network, a random intensity is generated using the random variable generator \code{rintens} (the default is a negative exponential random variable with rate 1). Given the intensity value, a Poisson process is generated with the specified intensity. The intensity of the final process is determined by the mean of the values generated by \code{rintens}. If \code{rintens=rexp} (the default), then the parameter \code{rate} specifies the inverse of the intensity. } \value{ Point pattern on a linear network (object of class \code{"lpp"}) with an attribute \code{"breaks"} containing the breakpoints (if \code{cuts="points"}) or the random lines (if \code{cuts="lines"}). } \author{ \adrian. } \seealso{ \code{\link{rcelllpp}} } \references{ Baddeley, A., Nair, G., Rakshit, S. and McSwiggan, G. (2017) \sQuote{Stationary} point processes are uncommon on linear networks. \emph{STAT} \bold{6}, {68--78}. } \examples{ plot(rSwitzerlpp(domain(spiders), 0.01, rate=100)) plot(rSwitzerlpp(domain(spiders), 0.0005, rate=100, cuts="l")) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/Window.lpp.Rd0000644000176200001440000000172114141460471016513 0ustar liggesusers\name{Window.lpp} \alias{Window.lpp} \alias{Window.lppm} \title{Extract Window of Spatial Object on a Network} \description{ Given a spatial object on a network, these functions extract the window in which the network is defined. } \usage{ \method{Window}{lpp}(X, \dots) \method{Window}{lppm}(X, \dots) } \arguments{ \item{X}{A spatial object.} \item{\dots}{Ignored.} } \value{ An object of class \code{"owin"} (see \code{\link[spatstat.geom]{owin.object}}) specifying an observation window. } \details{ These are methods for the generic function \code{\link[spatstat.geom]{Window}} which extract the spatial window in which the object \code{X} is defined. For the methods defined here, \code{X} should be a spatial object on a linear network (object of class \code{"lpp"} or \code{"lppm"}). } \seealso{ \code{\link[spatstat.geom]{Window}}. } \examples{ Window(spiders) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/intensity.lpp.Rd0000644000176200001440000000203714141460471017273 0ustar liggesusers\name{intensity.lpp} \alias{intensity.lpp} \title{ Empirical Intensity of Point Pattern on Linear Network } \description{ Computes the average number of points per unit length in a point pattern on a linear network. } \usage{ \method{intensity}{lpp}(X, ...) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}} It computes the empirical intensity of a point pattern on a linear network (object of class \code{"lpp"}), i.e. the average density of points per unit length. If the point pattern is multitype, the intensities of the different types are computed separately. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ intensity(chicago) } \author{\adrian and \rolf } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/model.images.lppm.Rd0000644000176200001440000000641114141460471017766 0ustar liggesusers\name{model.images.lppm} \alias{model.images.lppm} \title{Compute Images of Constructed Covariates} \description{ For a point process model fitted to spatial point pattern data on a linear network, this function computes pixel images of the covariates in the design matrix. } \usage{ \method{model.images}{lppm}(object, L = as.linnet(object), ...) } \arguments{ \item{object}{ Fitted point process model on a linear network. An object of class \code{"lppm"}. } \item{L}{ A linear network (object of class \code{"linnet"}) in which the images should be computed. Defaults to the network in which the model was fitted. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link[stats:model.matrix]{model.matrix.lm}}. } } \details{ This command is similar to \code{\link{model.matrix.lppm}} except that it computes pixel images of the covariates, instead of computing the covariate values at certain points only. The \code{object} must be a fitted spatial point process model on a linear network (object of class \code{"lppm"} produced by the model-fitting function \code{\link{lppm}}). The spatial covariates required by the model-fitting procedure are computed at every location on the network \code{L}. Note that the spatial covariates computed here are not necessarily the original covariates that were supplied when fitting the model. Rather, they are the canonical covariates, the covariates that appear in the loglinear representation of the (conditional) intensity and in the columns of the design matrix. For example, they might include dummy or indicator variables for different levels of a factor, depending on the contrasts that are in force. The format of the result depends on whether the original point pattern data were marked or unmarked. \itemize{ \item If the original dataset was unmarked, the result is a named list of pixel images on the network (objects of class \code{"linim"}) containing the values of the spatial covariates. The names of the list elements are the names of the covariates determined by \code{\link[stats:model.matrix]{model.matrix.lm}}. The result is also of class \code{"solist"} so that it can be plotted immediately. \item If the original dataset was a multitype point pattern, the result is a \code{\link[spatstat.geom]{hyperframe}} with one column for each possible type of points. Each column is a named list of pixel images on the network (objects of class \code{"linim"}) containing the values of the spatial covariates. The row names of the hyperframe are the names of the covariates determined by \code{\link[stats:model.matrix]{model.matrix.lm}}. } The pixel resolution is determined by the arguments \code{\dots} and \code{\link[spatstat.geom]{spatstat.options}}. } \value{ A list (of class \code{"solist"}) or array (of class \code{"hyperframe"}) containing pixel images on the network (objects of class \code{"linim"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.core]{model.matrix.ppm}}, \code{\link[stats]{model.matrix}}, \code{\link{lppm}}. } \examples{ fit <- lppm(spiders ~ x + polynom(y, 2)) model.images(fit) } \keyword{spatial} \keyword{models} spatstat.linnet/man/plot.lpp.Rd0000644000176200001440000000617614141460471016233 0ustar liggesusers\name{plot.lpp} \alias{plot.lpp} \title{ Plot Point Pattern on Linear Network } \description{ Plots a point pattern on a linear network. Plot method for the class \code{"lpp"} of point patterns on a linear network. } \usage{ \method{plot}{lpp}(x, \dots, main, add = FALSE, use.marks=TRUE, which.marks=NULL, show.all = !add, show.window=FALSE, show.network=TRUE, do.plot = TRUE, multiplot=TRUE) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link{plot.linnet}} or \code{\link{plot.ppp}}. } \item{main}{ Main title for plot. } \item{add}{ Logical value indicating whether the plot is to be added to the existing plot (\code{add=TRUE}) or whether a new plot should be initialised (\code{add=FALSE}, the default). } \item{use.marks}{ logical flag; if \code{TRUE}, plot points using a different plotting symbol for each mark; if \code{FALSE}, only the locations of the points will be plotted, using \code{\link{points}()}. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character or integer vector identifying one or more columns of marks. If \code{add=FALSE} then the default is to plot all columns of marks, in a series of separate plots. If \code{add=TRUE} then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } \item{show.all}{ Logical value indicating whether to plot everything including the main title and the window containing the network. } \item{show.window}{ Logical value indicating whether to plot the window containing the network. Overrides \code{show.all}. } \item{show.network}{ Logical value indicating whether to plot the network. } \item{do.plot}{ Logical value determining whether to actually perform the plotting. } \item{multiplot}{ Logical value giving permission to display multiple plots. } } \details{ The linear network is plotted by \code{\link{plot.linnet}}, then the points are plotted by \code{\link{plot.ppp}}. Commonly-used arguments include: \itemize{ \item \code{col} and \code{lwd} for the colour and width of lines in the linear network \item \code{cols} for the colour or colours of the points \item \code{chars} for the plot characters representing different types of points \item \code{legend} and \code{leg.side} to control the graphics legend } Note that the linear network will be plotted even when \code{add=TRUE}, unless \code{show.network=FALSE}. } \value{ (Invisible) object of class \code{"symbolmap"} giving the correspondence between mark values and plotting characters. } \seealso{ \code{\link{lpp}}. See \code{\link{plot.ppp}} for options for representing the points. See also \code{\link{points.lpp}}, \code{\link{text.lpp}}. } \examples{ plot(chicago, cols=1:6) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/domain.lpp.Rd0000644000176200001440000000314714141460471016517 0ustar liggesusers\name{domain.lpp} \alias{domain.lpp} \alias{domain.lppm} \alias{domain.linfun} \alias{domain.lintess} \title{ Extract the Linear Network on which Spatial Data are Defined } \description{ Given a spatial object representing data on a linear network, extract the network. } \usage{ \method{domain}{lpp}(X, \dots) \method{domain}{lppm}(X, \dots) \method{domain}{linfun}(X, \dots) \method{domain}{lintess}(X, \dots) } \arguments{ \item{X}{ A spatial object representing data on a linear network. An object of class \code{"lpp"}, \code{"lppm"}, \code{"linfun"} or \code{"lintess"}. } \item{\dots}{ Extra arguments. They are ignored by all the methods listed here. } } \details{ The function \code{\link[spatstat.core]{domain}} is generic, with methods for many classes. For a spatial object \code{X} \code{domain(X)} extracts the spatial domain in which \code{X} is defined. For a two-dimensional object \code{X}, typically \code{domain(X)} is the same as \code{Window(X)}. The exception is that, if \code{X} is a point pattern on a linear network (class \code{"lpp"}) or a point process model on a linear network (class \code{"lppm"}), then \code{domain(X)} is the linear network on which the points lie, while \code{Window(X)} is the two-dimensional window containing the linear network. } \value{ A linear network (object of class \code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.core]{domain}}, \code{\link[spatstat.geom]{Window}}, \code{\link[spatstat.geom]{Frame}} } \examples{ domain(chicago) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/tilenames.lintess.Rd0000644000176200001440000000171714141460471020120 0ustar liggesusers\name{tilenames.lintess} \alias{tilenames.lintess} \alias{tilenames<-.lintess} \title{Names of Tiles in a Tessellation on a Network} \description{ Extract or Change the Names of the Tiles in a Tessellation on a Network. } \usage{ \method{tilenames}{lintess}(x) \method{tilenames}{lintess}(x) <- value } \arguments{ \item{x}{ A tessellation on a linear network (object of class \code{"lintess"}). } \item{value}{Character vector giving new names for the tiles.} } \details{ These functions extract or change the names of the tiles that make up the tessellation \code{x}. If the tessellation is a regular grid, the tile names cannot be changed. } \value{ \code{tilenames} returns a character vector. } \seealso{ \code{\link{lintess}}, \code{\link[spatstat.geom]{tiles}} } \examples{ B <- lineardirichlet(runiflpp(5, simplenet)) tilenames(B) tilenames(B) <- letters[1:5] } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/linearmarkconnect.Rd0000644000176200001440000000560214141460471020153 0ustar liggesusers\name{linearmarkconnect} \alias{linearmarkconnect} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkconnect(X, i, j, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{markconnect}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (2014) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkequal}}, \code{\link{markconnect}}. } \examples{ pab <- linearmarkconnect(chicago, "assault", "burglary") # plot(alltypes(chicago, linearmarkconnect)) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/runiflpp.Rd0000644000176200001440000000272114141460471016312 0ustar liggesusers\name{runiflpp} \alias{runiflpp} \title{ Uniform Random Points on a Linear Network } \description{ Generates \eqn{n} random points, independently and uniformly distributed, on a linear network. } \usage{ runiflpp(n, L, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer, or a vector of integers specifying the number of points of each type. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ This function uses \code{\link{runifpointOnLines}} to generate the random points. } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{rlpp}} for non-uniform random points; \code{\link{rpoislpp}} for Poisson point process; \code{\link{lpp}}, \code{\link{linnet}} } \examples{ data(simplenet) X <- runiflpp(10, simplenet) plot(X) # marked Z <- runiflpp(c(a=10, b=3), simplenet) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/distfun.lpp.Rd0000644000176200001440000000476514141460471016733 0ustar liggesusers\name{distfun.lpp} \Rdversion{1.1} \alias{distfun.lpp} \title{ Distance Map on Linear Network } \description{ Compute the distance function of a point pattern on a linear network. } \usage{ \method{distfun}{lpp}(X, ..., k=1) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{k}{ An integer. The distance to the \code{k}th nearest point will be computed. } \item{\dots}{ Extra arguments are ignored. } } \details{ On a linear network \eqn{L}, the \dQuote{geodesic distance function} of a set of points \eqn{A} in \eqn{L} is the mathematical function \eqn{f} such that, for any location \eqn{s} on \eqn{L}, the function value \code{f(s)} is the shortest-path distance from \eqn{s} to \eqn{A}. The command \code{distfun.lpp} is a method for the generic command \code{\link{distfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- distfun(X)} returns a \emph{function} in the \R language that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. More efficiently \code{f} can be called in the form \code{v <- f(x, y, seg, tp)} where \code{seg} and \code{tp} are the local coordinates on the network. It can also be called as \code{v <- f(x)} where \code{x} is a point pattern on the same linear network. The function \code{f} obtained from \code{f <- distfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To identify \emph{which} point is the nearest neighbour, see \code{\link{nnfun.lpp}}. } \examples{ X <- runiflpp(3, simplenet) f <- distfun(X) f plot(f) # using a distfun as a covariate in a point process model: Y <- runiflpp(4, simplenet) fit <- lppm(Y ~D, covariates=list(D=f)) f(Y) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.linnet/man/clickjoin.Rd0000644000176200001440000000432514141460471016422 0ustar liggesusers\name{clickjoin} \alias{clickjoin} \title{ Interactively join vertices on a plot } \description{ Given a point pattern representing a set of vertices, this command gives a point-and-click interface allowing the user to join pairs of selected vertices by edges. } \usage{ clickjoin(X, \dots, add = TRUE, m = NULL, join = TRUE) } \arguments{ \item{X}{ Point pattern of vertices. An object of class \code{"ppp"}. } \item{\dots}{ Arguments passed to \code{\link{segments}} to control the plotting of the new edges. } \item{add}{ Logical. Whether the point pattern \code{X} should be added to the existing plot (\code{add=TRUE}) or a new plot should be created (\code{add=FALSE}). } \item{m}{ Optional. Logical matrix specifying an initial set of edges. There is an edge between vertices \code{i} and \code{j} if \code{m[i,j] = TRUE}. } \item{join}{ Optional. If \code{TRUE}, then each user click will join a pair of vertices. If \code{FALSE}, then each user click will delete an existing edge. This is only relevant if \code{m} is supplied. } } \details{ This function makes it easier for the user to create a linear network or a planar graph, given a set of vertices. The function first displays the point pattern \code{X}, then repeatedly prompts the user to click on a pair of points in \code{X}. Each selected pair of points will be joined by an edge. The function returns a logical matrix which has entries equal to \code{TRUE} for each pair of vertices joined by an edge. The selection of points is performed using \code{\link{identify.ppp}} which typically expects the user to click the left mouse button. This point-and-click interaction continues until the user terminates it, by pressing the middle mouse button, or pressing the right mouse button and selecting \code{stop}. The return value can be used in \code{\link{linnet}} to create a linear network. } \value{ Logical matrix \code{m} with value \code{m[i,j] = TRUE} for every pair of vertices \code{X[i]} and \code{X[j]} that should be joined by an edge. } \author{ \adrian. } \seealso{ \code{\link{linnet}}, \code{\link{clickppp}} } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/marks.lintess.Rd0000644000176200001440000000447514141460471017260 0ustar liggesusers\name{marks.lintess} \alias{marks.lintess} \alias{marks<-.lintess} \alias{unmark.lintess} \title{Marks of a Tessellation on a Network} \description{ Extract or change the marks attached to the tiles of a tessellation on a linear network. } \usage{ \method{marks}{lintess}(x, \dots) \method{marks}{lintess}(x, \dots) <- value \method{unmark}{lintess}(X) } \arguments{ \item{x,X}{ Tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Ignored. } \item{value}{ Vector or data frame of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor or data frame, containing the mark values attached to the tiles of \code{x}. If there are no marks, the result is \code{NULL}. For \code{unmark(x)}, the result is the tessellation without marks. For \code{marks(x) <- value}, the result is the updated tessellation \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). } \details{ These functions extract or change the marks attached to each of the tiles in the tessellation \code{x}. They are methods for the generic functions \code{\link[spatstat.geom]{marks}}, \code{\link[spatstat.geom]{marks<-}} and \code{\link[spatstat.geom]{unmark}} for the class \code{"lintess"} of tessellations on a network. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The marks can be a vector, a factor, or a data frame. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of tiles in \code{x}, or a data frame with as many rows as there are tiles in \code{x}. If \code{value} is a single value, or a data frame with one row, then it will be replicated so that the same marks will be attached to each tile. To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}. } \seealso{ \code{\link{lintess}}, \code{\link[spatstat.geom]{marks}}, \code{\link[spatstat.geom]{marks<-}} } \examples{ B <- lineardirichlet(runiflpp(5, simplenet)) marks(B) <- letters[1:5] } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/persp.linim.Rd0000644000176200001440000000441114141460471016711 0ustar liggesusers\name{persp.linim} \alias{persp.linim} \title{ Perspective View of Pixel Image on a Linear Network } \description{ Given a pixel image on a linear network, generate a perspective view. } \usage{ \method{persp}{linim}(x, \dots, main, grid = TRUE, ngrid = 10, col.grid = "grey", col.base = "white", neg.args=list(), warncross=FALSE) } \arguments{ \item{x}{ Pixel image on a linear network (object of class \code{"linim"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{persp.default}} to control the perspective view, or passed to \code{\link[graphics]{segments}} or \code{\link[graphics]{polygon}} to control the appearance of the vertical panes. } \item{main}{ Main title for the plot. } \item{grid}{ Logical value indicating whether to draw a rectangular grid at height zero, to assist the perception of perspective. } \item{ngrid}{ Number of grid lines to draw, if \code{grid=TRUE}. } \item{col.grid}{ Colour of grid lines, if \code{grid=TRUE}. } \item{col.base}{ Colour of base plane, if \code{grid=TRUE}. } \item{neg.args}{ Optional list of arguments passed to \code{\link[graphics]{polygon}} when displaying negative values of the function. } \item{warncross}{ Logical value indicating whether to issue a warning if two segments of the network cross each other (which causes difficulty for the algorithm). } } \details{ The pixel values are interpreted as the spatially-varying height of a vertical surface erected on each segment of the linear network. These surfaces are drawn in perspective view. This style of plot is often attributed to Okabe and Sugihara (2012). } \value{ (Invisibly) the perspective transformation matrix, as described in the help for \code{\link[graphics]{persp.default}}. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{persp.linfun}} } \references{ Okabe, A. and Sugihara, K. (2012) \emph{Spatial Analysis Along Networks}. John Wiley and Sons, New York. } \examples{ if(interactive()) { Z <- density(chicago, 100) } else { X <- runiflpp(10, simplenet) Z <- density(X, 0.1) } persp(Z, theta=30, phi=20) } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/linearmarkequal.Rd0000644000176200001440000000422314141460471017627 0ustar liggesusers\name{linearmarkequal} \alias{linearmarkequal} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkequal(X, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is the mark equality function for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (2014) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkconnect}}, \code{\link{markconnect}}. } \examples{ if(interactive()) { X <- chicago } else { m <- sample(factor(c("A","B")), 20, replace=TRUE) X <- runiflpp(20, simplenet) \%mark\% m } p <- linearmarkequal(X) } \author{\adrian} \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/unstack.lpp.Rd0000644000176200001440000000363514141460471016722 0ustar liggesusers\name{unstack.lpp} \alias{unstack.lpp} \alias{unstack.lintess} \title{ Separate Multiple Columns of Marks } \description{ Given a spatial pattern on a network, with several columns of marks, take one column at a time, and return a list of spatial patterns each having only one column of marks. } \usage{ \method{unstack}{lpp}(x, \dots) \method{unstack}{lintess}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"lpp"}) or a tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Ignored. } } \details{ The functions defined here are methods for the generic \code{\link[utils]{unstack}}. The functions expect a spatial object \code{x} which has several columns of marks; they separate the columns, and return a list of spatial objects, each having only one column of marks. If \code{x} has several columns of marks (i.e. \code{marks(x)} is a matrix, data frame or hyperframe with several columns), then \code{y <- unstack(x)} is a list of spatial objects, each of the same kind as \code{x}. The \code{j}th entry \code{y[[j]]} is equivalent to \code{x} except that it only includes the \code{j}th column of \code{marks(x)}. If \code{x} has no marks, or has only a single column of marks, the result is a list consisting of one entry, which is \code{x}. } \value{ A list, of class \code{"solist"}, whose entries are objects of the same type as \code{x}. } \author{ \spatstatAuthors. } \seealso{ \code{\link[utils]{unstack}} \code{\link[spatstat.geom]{unstack.ppp}}, \code{\link[spatstat.core]{unstack.msr}}. See also methods for the generic \code{\link[base]{split}} such as \code{\link[spatstat.geom]{split.ppx}} which applies to \code{"lpp"} objects. } \examples{ X <- runiflpp(5, simplenet) marks(X) <- data.frame(id=1:5, code=factor(letters[1:5])) unstack(X) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/crossing.linnet.Rd0000644000176200001440000000200214141460471017562 0ustar liggesusers\name{crossing.linnet} \alias{crossing.linnet} \title{ Crossing Points between Linear Network and Other Lines } \description{ Find all the crossing-points between a linear network and another pattern of lines or line segments. } \usage{ crossing.linnet(X, Y) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}). } \item{Y}{ A linear network, or a spatial pattern of line segments (class \code{"psp"}) or infinite lines (class \code{"infline"}). } } \details{ All crossing-points between \code{X} and \code{Y} are determined. The result is a point pattern on the network \code{X}. } \value{ Point pattern on a linear network (object of class \code{"lpp"}). } \author{ \adrian. } \seealso{ \code{\link{crossing.psp}} % \code{\link{chop.linnet}} } \examples{ plot(simplenet, main="") L <- infline(p=runif(3), theta=runif(3, max=pi/2)) plot(L, col="red") Y <- crossing.linnet(simplenet, L) plot(Y, add=TRUE, cols="blue") } \keyword{spatial} \keyword{manip} spatstat.linnet/man/cdf.test.lpp.Rd0000644000176200001440000002423714141460471016765 0ustar liggesusers\name{cdf.test.lpp} \alias{cdf.test.lppm} \alias{cdf.test.lpp} \title{Spatial Distribution Test for Points on a Linear Network} \description{ Performs a test of goodness-of-fit of a point process model on a linear network. The observed and predicted distributions of the values of a spatial covariate are compared using either the Kolmogorov-Smirnov test, \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or Anderson-Darling test. For non-Poisson models, a Monte Carlo test is used. } \usage{ \method{cdf.test}{lpp}(X, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE) \method{cdf.test}{lppm}(model, covariate, test=c("ks", "cvm", "ad"), \dots, interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{model}{ A fitted point process model on a linear network (object of class \code{"lppm"}) } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"} or \code{"linim"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{test}{ Character string identifying the test to be performed: \code{"ks"} for Kolmogorov-Smirnov test, \code{"cvm"} for \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or \code{"ad"} for Anderson-Darling test. } \item{\dots}{ Arguments passed to \code{\link[stats]{ks.test}} (from the \pkg{stats} package) or \code{\link[goftest]{cvm.test}} or \code{\link[goftest]{ad.test}} (from the \pkg{goftest} package) to control the test. } \item{interpolate}{ Logical flag indicating whether to interpolate pixel images. If \code{interpolate=TRUE}, the value of the covariate at each point of \code{X} will be approximated by interpolating the nearby pixel values. If \code{interpolate=FALSE}, the nearest pixel value will be used. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } \item{nsim}{ Number of simulated realisations from the \code{model} to be used for the Monte Carlo test, when \code{model} is not a Poisson process. } \item{verbose}{ Logical value indicating whether to print progress reports when performing a Monte Carlo test. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data on a linear network. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test, the \ifelse{latex}{\out{Cram\'er}}{Cramer}-von Mises test or the Anderson-Darling test. For Gibbs models, a Monte Carlo test is performed using these test statistics. The function \code{\link[spatstat.core]{cdf.test}} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). See the help file for \code{\link[spatstat.core]{cdf.test}} for information on the generic function and the methods for data in two-dimensional space, classes \code{"ppp"}, \code{"ppm"} and \code{"slrm"}. This help file describes the methods for data on a linear network, classes \code{"lpp"} and \code{"lppm"}. \itemize{ \item If \code{X} is a point pattern on a linear network (object of class \code{"lpp"}), then \code{cdf.test(X, \dots)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model on a network (object of class \code{"lppm"}) then \code{cdf.test(model, \dots)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using a classical goodness-of-fit test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} or \code{"linim"}) containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The A goodness-of-fit test of the uniform distribution is applied to these numbers using \code{stats::\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}} or \code{goftest::\link[goftest]{ad.test}}. This test was apparently first described (in the context of two-dimensional spatial data, and using Kolmogorov-Smirnov) by Berman (1986). See also Baddeley et al (2005). If \code{model} is not a Poisson process, then a Monte Carlo test is performed, by generating \code{nsim} point patterns which are simulated realisations of the \code{model}, re-fitting the model to each simulated point pattern, and calculating the test statistic for each fitted model. The Monte Carlo \eqn{p} value is determined by comparing the simulated values of the test statistic with the value for the original data. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"cdftest"} for which there is a plot method \code{\link{plot.cdftest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link[stats]{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link[stats]{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"cdftest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{cdf.test} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{\adrian and \rolf } \seealso{ \code{\link[spatstat.core]{plot.cdftest}}, \code{\link[spatstat.core]{quadrat.test}}, \code{\link[spatstat.core]{berman.test}}, \code{\link[stats]{ks.test}}, \code{goftest::\link[goftest]{cvm.test}}, \code{goftest::\link[goftest]{ad.test}}, \code{\link{lppm}} } \references{ Baddeley, A., Turner, R., \Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ op <- options(useFancyQuotes=FALSE) # test of CSR using x coordinate cdf.test(spiders, "x") # fit inhomogeneous Poisson model and test model <- lppm(spiders ~x) cdf.test(model, "y") # test of CSR using a function of x and y fun <- function(x,y){2* x + y} cdf.test(spiders, fun) # test of CSR using an image covariate fim <- as.linim(fun, domain(spiders)) cdf.test(spiders, fim) options(op) } \keyword{htest} \keyword{spatial} spatstat.linnet/man/model.frame.lppm.Rd0000644000176200001440000000360414141460471017614 0ustar liggesusers\name{model.frame.lppm} \alias{model.frame.lppm} \title{ Extract the Variables in a Point Process Model on a Network } \description{ Given a fitted point process model on a network, this function returns a data frame containing all the variables needed to fit the model using the Berman-Turner device. } \usage{ \method{model.frame}{lppm}(formula, ...) } \arguments{ \item{formula}{ A fitted point process model on a linear network. An object of class \code{"lppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{model.frame.glm}}. } } \details{ The function \code{\link[stats]{model.frame}} is generic. This function is a method for \code{\link[stats]{model.frame}} for fitted point process models on a linear network (objects of class \code{"lppm"}). The first argument should be a fitted point process model; it has to be named \code{formula} for consistency with the generic function. The result is a data frame containing all the variables used in fitting the model. The data frame has one row for each quadrature point used in fitting the model. The quadrature scheme can be extracted using \code{\link{quad.ppm}}. } \value{ A \code{data.frame} containing all the variables used in the fitted model, plus additional variables specified in \code{\dots}. It has an additional attribute \code{"terms"} containing information about the model formula. For details see \code{\link{model.frame.glm}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \seealso{ \code{\link{lppm}}, \code{\link[stats]{model.frame}}, \code{\link[spatstat.core]{model.matrix.ppm}} } \examples{ fit <- lppm(spiders ~ x) mf <- model.frame(fit) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{models} spatstat.linnet/man/linequad.Rd0000644000176200001440000000440414141460471016255 0ustar liggesusers\name{linequad} \alias{linequad} \title{ Quadrature Scheme on a Linear Network } \description{ Generates a quadrature scheme (an object of class \code{"quad"}) on a linear network. } \usage{ linequad(X, Y, \dots, eps = NULL, nd = 1000, random = FALSE) } \arguments{ \item{X}{ Data points. An object of class \code{"lpp"} or \code{"ppp"}. } \item{Y}{ Line segments on which the points of \code{X} lie. An object of class \code{"psp"}. Required only when \code{X} is a \code{"ppp"} object. } \item{\dots}{ Ignored. } \item{eps}{ Optional. Spacing between successive dummy points along each segment. (This is the maximum spacing; some spacings will be shorter.) } \item{nd}{ Optional. Total number of dummy points to be generated. (Actual number may be larger.) } \item{random}{ Logical value indicating whether the sequence of dummy points should start at a randomly-chosen position along each segment. } } \details{ This command generates a quadrature scheme (object of class \code{"quad"}) from a pattern of points on a linear network. Normally the user does not need to call \code{linequad} explicitly. It is invoked by \pkg{spatstat} functions when needed. A quadrature scheme is required by \code{\link{lppm}} in order to fit point process models to point pattern data on a linear network. A quadrature scheme is also used by \code{\link{rhohat.lpp}} and other functions. In order to create the quadrature scheme, dummy points are placed along each line segment of the network. The dummy points are evenly-spaced with spacing \code{eps}. The default is \code{eps = totlen/nd} where \code{totlen} is the total length of all line segments in the network. Every line segment of the network will contain at least one dummy point. Consequently the actual number of dummy points generated will typically be greater than \code{nd}, especially when \code{nd} is small. If \code{eps} is specified, the number of dummy points will be greater than \code{totlen/eps}, especially when \code{eps} is large. } \value{ A quadrature scheme (object of class \code{"quad"}). } \author{ \adrian, Greg McSwiggan and Suman Rakshit. } \seealso{ \code{\link{lppm}} } \keyword{datagen} \keyword{spatial} spatstat.linnet/man/sdr.lpp.Rd0000644000176200001440000000756414141460471016047 0ustar liggesusers\name{sdr.lpp} \alias{sdr.lpp} \title{ Sufficient Dimension Reduction for a Point Pattern on a Linear Network } \description{ Given a point pattern on a linear network, and a set of predictors, find a minimal set of new predictors, each constructed as a linear combination of the original predictors. } \usage{ \method{sdr}{lpp}(X, covariates, method = c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1 = 1, Dim2 = 1, predict=FALSE, \dots) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{covariates}{ A list of pixel images (objects of class \code{"im"} or \code{"linim"}) to serve as predictor variables. } \item{method}{ Character string indicating which method to use. See Details. } \item{Dim1}{ Dimension of the first order Central Intensity Subspace (applicable when \code{method} is \code{"DR"}, \code{"NNIR"}, \code{"SAVE"} or \code{"TSE"}). } \item{Dim2}{ Dimension of the second order Central Intensity Subspace (applicable when \code{method="TSE"}). } \item{predict}{ Logical value indicating whether to compute the new predictors as well. } \item{\dots}{Extra arguments are ignored.} } \details{ This is the method for \code{\link[spatstat.core]{sdr}} for the class \code{"lpp"} of point patterns on a linear network. Given a point pattern \eqn{X} and predictor variables \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}, Sufficient Dimension Reduction methods (Guan and Wang, 2010) attempt to find a minimal set of new predictor variables, each constructed by taking a linear combination of the original predictors, which explain the dependence of \eqn{X} on \eqn{Z_1, \dots, Z_p}{Z[1], ..., Z[p]}. The methods do not assume any particular form of dependence of the point pattern on the predictors. The predictors are assumed to be Gaussian random fields. Available methods are: \tabular{ll}{ \code{method="DR"} \tab directional regression \cr \code{method="NNIR"} \tab nearest neighbour inverse regression \cr \code{method="SAVE"} & sliced average variance estimation \cr \code{method="SIR"} & sliced inverse regression \cr \code{method="TSE"} & two-step estimation \cr } The result includes a matrix \code{B} whose columns are estimates of the basis vectors of the space of new predictors. That is, the \code{j}th column of \code{B} expresses the \code{j}th new predictor as a linear combination of the original predictors. If \code{predict=TRUE}, the new predictors are also evaluated. They can also be evaluated using \code{\link[spatstat.core]{sdrPredict}}. } \value{ A list with components \code{B, M} or \code{B, M1, M2} where \code{B} is a matrix whose columns are estimates of the basis vectors for the space, and \code{M} or \code{M1,M2} are matrices containing estimates of the kernel. If \code{predict=TRUE}, the result also includes a component \code{Y} which is a list of pixel images giving the values of the new predictors. } \examples{ # sdr(bei, bei.extra) xim <- as.linim(function(x,y) { x }, simplenet) yim <- as.linim(function(x,y) { y }, simplenet) X <- runiflpp(30, simplenet) sdr(X, list(x=xim, y=yim)) } \seealso{ \code{\link[spatstat.core]{sdrPredict}} to compute the new predictors from the coefficient matrix. \code{\link[spatstat.core]{dimhat}} to estimate the subspace dimension. \code{\link[spatstat.core]{subspaceDistance}} } \references{ Guan, Y. and Wang, H. (2010) Sufficient dimension reduction for spatial point processes directed by Gaussian random fields. \emph{Journal of the Royal Statistical Society, Series B}, \bold{72}, 367--387. } \author{ Based on a Matlab original, for two-dimensional point patterns, by Yongtao Guan. Adapted to \R, and to linear networks, by Suman Rakshit. } \keyword{spatial} \keyword{multivariate} spatstat.linnet/man/lixellate.Rd0000644000176200001440000000467614141460471016451 0ustar liggesusers\name{lixellate} \alias{lixellate} \title{ Subdivide Segments of a Network } \description{ Each line segment of a linear network will be divided into several shorter segments (line elements or lixels). } \usage{ lixellate(X, \dots, nsplit, eps, sparse = TRUE) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}) or a point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } \item{nsplit}{ Number of pieces into which \emph{each} line segment of \code{X} should be divided. Either a single integer, or an integer vector with one entry for each line segment in \code{X}. Incompatible with \code{eps}. } \item{eps}{ Maximum length of the resulting pieces of line segment. A single numeric value. Incompatible with \code{nsplit}. } \item{sparse}{ Optional. Logical value specifying whether the resulting linear network should be represented using a sparse matrix. If \code{sparse=NULL}, then the representation will be the same as in \code{X}. } } \details{ Each line segment in \code{X} will be subdivided into equal pieces. The result is an object of the same kind as \code{X}, representing the same data as \code{X} except that the segments have been subdivided. Splitting is controlled by the arguments \code{nsplit} and \code{eps}, exactly one of which should be given. If \code{nsplit} is given, it specifies the number of pieces into which \emph{each} line segment of \code{X} should be divided. It should be either a single integer, or an integer vector of length equal to the number of line segments in \code{X}. If \code{eps} is given, it specifies the maximum length of any resulting piece of line segment. It is strongly advisable to use \code{sparse=TRUE} (the default) to limit the computation time. If \code{X} is a point pattern (class \code{"lpp"}) then the spatial coordinates and marks of each data point are unchanged, but the local coordinates will change, because they are adjusted to map them to the new subdivided network. } \value{ Object of the same kind as \code{X}. } \author{ Greg McSwiggan, \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{lpp}}. } \examples{ A <- lixellate(simplenet, nsplit=4) plot(A, main="lixellate(simplenet, nsplit=4)") points(vertices(A), pch=16) spiders lixellate(spiders, nsplit=3) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/rlpp.Rd0000644000176200001440000000450414141460471015431 0ustar liggesusers\name{rlpp} \alias{rlpp} \title{ Random Points on a Linear Network } \description{ Generates \eqn{n} independent random points on a linear network with a specified probability density. } \usage{ rlpp(n, f, \dots, nsim=1, drop=TRUE) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer giving the number of points, or an integer vector giving the numbers of points of each type. } \item{f}{ Probability density (not necessarily normalised). A pixel image on a linear network (object of class \code{"linim"}) or a function on a linear network (object of class \code{"linfun"}). Alternatively, \code{f} can be a list of functions or pixel images, giving the densities of points of each type. } \item{\dots}{ Additional arguments passed to \code{f} if it is a function or a list of functions. } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ The linear network \code{L}, on which the points will be generated, is determined by the argument \code{f}. If \code{f} is a function, it is converted to a pixel image on the linear network, using any additional function arguments \code{\dots}. If \code{n} is a single integer and \code{f} is a function or pixel image, then independent random points are generated on \code{L} with probability density proportional to \code{f}. If \code{n} is an integer vector and \code{f} is a list of functions or pixel images, where \code{n} and \code{f} have the same length, then independent random points of several types are generated on \code{L}, with \code{n[i]} points of type \code{i} having probability density proportional to \code{f[[i]]}. } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ \adrian } \seealso{ \code{\link{runiflpp}} } \examples{ g <- function(x, y, seg, tp) { exp(x + 3*y) } f <- linfun(g, simplenet) rlpp(20, f) plot(rlpp(20, f, nsim=3)) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/pseudoR2.lppm.Rd0000644000176200001440000000351114141460471017123 0ustar liggesusers\name{pseudoR2.lppm} \alias{pseudoR2.lppm} \title{ Calculate Pseudo-R-Squared for Point Process Model on Linear Network } \description{ Given a fitted point process model on a linear network, calculate the pseudo-R-squared value, which measures the fraction of variation in the data that is explained by the model. } \usage{ \method{pseudoR2}{lppm}(object, \dots, keepoffset=TRUE) } \arguments{ \item{object}{ Fitted point process model on a linear network. An object of class \code{"lppm"}. } \item{keepoffset}{ Logical value indicating whether to retain offset terms in the model when computing the deviance difference. See Details. } \item{\dots}{ Additional arguments passed to \code{\link{deviance.lppm}}. } } \details{ The function \code{\link[spatstat.core]{pseudoR2}} is generic, with methods for fitted point process models of class \code{"ppm"} and \code{"lppm"}. This function computes McFadden's pseudo-Rsquared \deqn{ R^2 = 1 - \frac{D}{D_0} }{ R^2 = 1 - D/D0 } where \eqn{D} is the deviance of the fitted model \code{object}, and \eqn{D_0}{D0} is the deviance of the null model. Deviance is defined as twice the negative log-likelihood or log-pseudolikelihood. The null model is usually obtained by re-fitting the model using the trend formula \code{~1}. However if the original model formula included \code{offset} terms, and if \code{keepoffset=TRUE} (the default), then the null model formula consists of these offset terms. This ensures that the \code{pseudoR2} value is non-negative. } \value{ A single numeric value. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.core]{pseudoR2}}, \code{\link{deviance.lppm}}. } \examples{ X <- rpoislpp(10, simplenet) fit <- lppm(X ~ y) pseudoR2(fit) } \keyword{spatial} \keyword{models} spatstat.linnet/man/is.stationary.lppm.Rd0000644000176200001440000000303214141460471020225 0ustar liggesusers\name{is.stationary.lppm} \alias{is.stationary.lppm} \alias{is.poisson.lppm} \title{ Recognise Stationary and Poisson Point Process Models on a Network } \description{ Given a point process model that has been fitted to data on a network, determine whether the model is a stationary point process, and whether it is a Poisson point process. } \usage{ \method{is.stationary}{lppm}(x) \method{is.poisson}{lppm}(x) } \arguments{ \item{x}{ A fitted spatial point process model on a linear network (object of class \code{"lppm"}). } } \details{ The argument \code{x} represents a fitted spatial point process model on a linear network. \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents a stationary point process, and \code{FALSE} if not. \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents a Poisson point process, and \code{FALSE} if not. The functions \code{\link[spatstat.core]{is.stationary}} and \code{\link[spatstat.core]{is.poisson}} are generic, with methods for many classes of models. } \value{ A logical value. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.geom]{is.marked}} to determine whether a model is a marked point process. \code{\link[spatstat.core]{is.stationary}}, \code{\link[spatstat.core]{is.poisson}} for generics. \code{\link{summary.lppm}} for detailed information. Model-fitting function \code{\link{lppm}}. } \examples{ fit <- lppm(spiders ~ x) is.stationary(fit) is.poisson(fit) } \keyword{spatial} \keyword{models} spatstat.linnet/man/as.linnet.linim.Rd0000644000176200001440000000316114141460471017454 0ustar liggesusers\name{as.linnet.linim} \alias{as.linnet.lpp} \alias{as.linnet.linim} \alias{as.linnet.linfun} \alias{as.linnet.lintess} \title{ Extract Linear Network from Data on a Linear Network } \description{ Given some kind of data on a linear network, the command \code{as.linnet} extracts the linear network itself. } \usage{ \method{as.linnet}{linim}(X, \dots) \method{as.linnet}{linfun}(X, \dots) \method{as.linnet}{lintess}(X, \dots) \method{as.linnet}{lpp}(X, \dots, fatal=TRUE, sparse) } \arguments{ \item{X}{ Data on a linear network. A point pattern (class \code{"lpp"}), pixel image (class \code{"linim"}), function (class \code{"linfun"}) or tessellation (class \code{"lintess"}) on a linear network. } \item{\dots}{ Ignored. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. Default is to keep the same representation as in \code{X}. } } \details{ These are methods for the generic \code{\link{as.linnet}} for various classes. The network on which the data are defined is extracted. } \value{ A linear network (object of class \code{"linnet"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{methods.linnet}}. } \examples{ # make some data xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet) as.linnet(xcoord) X <- as.linim(xcoord) as.linnet(X) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/lintess.Rd0000644000176200001440000000636014141460471016137 0ustar liggesusers\name{lintess} \alias{lintess} \title{ Tessellation on a Linear Network } \description{ Create a tessellation on a linear network. } \usage{ lintess(L, df, marks=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{df}{ Data frame of local coordinates for the pieces that make up the tiles of the tessellation. See Details. } \item{marks}{ Vector or data frame of marks associated with the tiles of the tessellation. } } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. The data frame \code{df} should have columns named \code{seg}, \code{t0}, \code{t1} and \code{tile}. Any additional columns will be ignored. Each row of the data frame specifies one sub-segment of the network and allocates it to a particular tile. The \code{seg} column specifies which line segment of the network contains the sub-segment. Values of \code{seg} are integer indices for the segments in \code{as.psp(L)}. The \code{t0} and \code{t1} columns specify the start and end points of the sub-segment. They should be numeric values between 0 and 1 inclusive, where the values 0 and 1 representing the network vertices that are joined by this network segment. The \code{tile} column specifies which tile of the tessellation includes this sub-segment. It will be coerced to a factor and its levels will be the names of the tiles. If \code{df} is missing or \code{NULL}, the result is a tessellation with only one tile, consisting of the entire network \code{L}. Additional data called \emph{marks} may be associated with each tile of the tessellation. The argument \code{marks} should be a vector with one entry for each tile (that is, one entry for each level of \code{df$tile}) or a data frame with one row for each tile. In general \code{df} and \code{marks} will have different numbers of rows. } \value{ An object of class \code{"lintess"}. There are methods for \code{print}, \code{plot} and \code{summary} for this object. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{linnet}} for linear networks. \code{\link{plot.lintess}} for plotting. \code{\link{divide.linnet}} to make a tessellation demarcated by given points. \code{\link{lineardirichlet}} to create the Dirichlet-Voronoi tessellation from a point pattern on a linear network. \code{\link{as.linfun.lintess}}, \code{\link{as.linnet.lintess}} and \code{\link{as.linim}} to convert to other classes. \code{\link{tile.lengths}} to compute the length of each tile in the tessellation. The undocumented methods \code{Window.lintess} and \code{as.owin.lintess} extract the spatial window. } \examples{ # tessellation consisting of one tile for each existing segment ns <- nsegments(simplenet) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) u <- lintess(simplenet, df) u plot(u) S <- as.psp(simplenet) marks(u) <- data.frame(len=lengths_psp(S), ang=angles.psp(S)) u plot(u) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/as.linim.Rd0000644000176200001440000000621214141460471016164 0ustar liggesusers\name{as.linim} \alias{as.linim} \alias{as.linim.linim} \alias{as.linim.linfun} \alias{as.linim.default} \title{Convert to Pixel Image on Linear Network} \description{ Converts various kinds of data to a pixel image on a linear network. } \usage{ as.linim(X, \dots) \method{as.linim}{linim}(X, \dots) \method{as.linim}{default}(X, L, \dots, eps = NULL, dimyx = NULL, xy = NULL, delta=NULL, nd=NULL) \method{as.linim}{linfun}(X, L=domain(X), \dots, eps = NULL, dimyx = NULL, xy = NULL, delta=NULL, nd=NULL) } \arguments{ \item{X}{ Data to be converted to a pixel image on a linear network. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function. } \item{eps,dimyx,xy}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution. } \item{delta}{ Optional. Numeric value giving the approximate distance (in coordinate units) between successive sample points along each segment of the network. } \item{nd}{ Optional. Integer giving the (approximate) number of sample points on the network. Ignored if \code{delta} is given. } } \details{ This function converts the data \code{X} into a pixel image on a linear network, an object of class \code{"linim"} (see \code{\link{linim}}). The argument \code{X} may be any of the following: \itemize{ \item a function on a linear network, an object of class \code{"linfun"}. \item a pixel image on a linear network, an object of class \code{"linim"}. \item a pixel image, an object of class \code{"im"}. \item any type of data acceptable to \code{\link{as.im}}, such as a function, numeric value, or window. } First \code{X} is converted to a pixel image object \code{Y} (object of class \code{"im"}). The conversion is performed by \code{\link{as.im}}. The arguments \code{eps}, \code{dimyx} and \code{xy} determine the pixel resolution. Next \code{Y} is converted to a pixel image on a linear network using \code{\link{linim}}. The argument \code{L} determines the linear network. If \code{L} is missing or \code{NULL}, then \code{X} should be an object of class \code{"linim"}, and \code{L} defaults to the linear network on which \code{X} is defined. In addition to converting the function to a pixel image, the algorithm also generates a fine grid of sample points evenly spaced along each segment of the network (with spacing at most \code{delta} coordinate units). The function values at these sample points are stored in the resulting object as a data frame (the argument \code{df} of \code{\link{linim}}). This mechanism allows greater accuracy for some calculations (such as \code{\link{integral.linim}}). } \value{ An image object on a linear network; an object of class \code{"linim"}. } \seealso{ \code{\link{as.im}} } \examples{ f <- function(x,y){ x + y } plot(as.linim(f, simplenet)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{manip} spatstat.linnet/man/integral.linim.Rd0000644000176200001440000000327214141460471017371 0ustar liggesusers\name{integral.linim} \alias{integral.linim} \alias{integral.linfun} \title{ Integral on a Linear Network } \description{ Computes the integral (total value) of a function or pixel image over a linear network. } \usage{ \method{integral}{linim}(f, domain=NULL, ...) \method{integral}{linfun}(f, domain=NULL, ..., delta, nd) } \arguments{ \item{f}{ A pixel image on a linear network (class \code{"linim"}) or a function on a linear network (class \code{"linfun"}). } \item{domain}{ Optional window specifying the domain of integration. Alternatively a tessellation. } \item{\dots}{ Ignored. } \item{delta}{ Optional. The step length (in coordinate units) for computing the approximate integral. A single positive number. } \item{nd}{ Optional. Integer giving the approximate number of sample points on the network. } } \details{ The integral (total value of the function over the network) is calculated. If \code{domain} is a window (class \code{"owin"}) then the integration will be restricted to this window. If \code{domain} is a tessellation (class \code{"tess"}) then the integral of \code{f} in each tile of \code{domain} will be computed. } \value{ A single numeric or complex value (or a vector of such values if \code{domain} is a tessellation). } \seealso{ \code{\link{linim}}, \code{\link{integral.im}} } \examples{ # make some data xcoord <- linfun(function(x,y,seg,tp) { x }, simplenet) integral(xcoord) X <- as.linim(xcoord) integral(X) # integrals inside each tile of a tessellation A <- quadrats(Frame(simplenet), 3) integral(X, A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.linnet/man/as.linnet.psp.Rd0000644000176200001440000000454714141460471017157 0ustar liggesusers\name{as.linnet.psp} \alias{as.linnet.psp} \title{ Convert Line Segment Pattern to Linear Network } \description{ Converts a line segment pattern to a linear network. } \usage{ \method{as.linnet}{psp}(X, \dots, eps, sparse=FALSE) } \arguments{ \item{X}{ Line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{eps}{ Optional. Distance threshold. If two segment endpoints are closer than \code{eps} units apart, they will be treated as the same point, and will become a single vertex in the linear network. } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. } } \details{ This command converts any collection of line segments into a linear network by guessing the connectivity of the network, using the distance threshold \code{eps}. If any segments in \code{X} cross over each other, they are first cut into pieces using \code{\link{selfcut.psp}}. Then any pair of segment endpoints lying closer than \code{eps} units apart, is treated as a single vertex. The linear network is then constructed using \code{\link{linnet}}. It would be wise to check the result by plotting the degree of each vertex, as shown in the Examples. If \code{X} has marks, then these are stored in the resulting linear network \code{Y <- as.linnet(X)}, and can be extracted as \code{marks(as.psp(Y))} or \code{marks(Y$lines)}. } \value{ A linear network (object of class \code{"linnet"}). The result also has an attribute \code{"camefrom"} indicating the provenance of each line in the resulting network. For example \code{camefrom[3]=2} means that the third line segment in the result is a piece of the second segment of \code{X}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{selfcut.psp}}, \code{\link{methods.linnet}}. } \examples{ # make some data A <- psp(0.09, 0.55, 0.79, 0.80, window=owin()) B <- superimpose(A, as.psp(simplenet)) # convert to a linear network L <- as.linnet(B) # check validity L plot(L) text(vertices(L), labels=vertexdegree(L)) # show the pieces that came from original segment number 1 S <- as.psp(L) (camefrom <- attr(L, "camefrom")) parts <- which(camefrom == 1) plot(S[parts], add=TRUE, col="green", lwd=2) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/Math.linim.Rd0000644000176200001440000000637014141460471016457 0ustar liggesusers\name{Math.linim} \alias{Math.linim} \alias{Ops.linim} \alias{Summary.linim} \alias{Complex.linim} \title{S3 Group Generic Methods for Images on a Linear Network} \description{ These are group generic methods for images of class \code{"linim"}, which allows for usual mathematical functions and operators to be applied directly to pixel images on a linear network. See Details for a list of implemented functions. } \usage{ ## S3 methods for group generics have prototypes: \special{Math(x, \dots)} \special{Ops(e1, e2)} \special{Complex(z)} \special{Summary(\dots, na.rm = FALSE)} %NAMESPACE S3method("Math", "linim") %NAMESPACE S3method("Ops", "linim") %NAMESPACE S3method("Complex", "linim") %NAMESPACE S3method("Summary", "linim") } \arguments{ \item{x, z, e1, e2}{objects of class \code{"linim"}.} \item{\dots}{further arguments passed to methods.} \item{na.rm}{logical: should missing values be removed?} } \details{ An object of class \code{"linim"} represents a pixel image on a linear network. See \code{\link{linim}}. Below is a list of mathematical functions and operators which are defined for these images. Not all functions will make sense for all types of images. For example, none of the functions in the \code{"Math"} group make sense for character-valued images. Note that the \code{"Ops"} group methods are implemented using \code{\link{eval.linim}}. \enumerate{ \item Group \code{"Math"}: \itemize{ \item \code{abs}, \code{sign}, \code{sqrt},\cr \code{floor}, \code{ceiling}, \code{trunc},\cr \code{round}, \code{signif} \item \code{exp}, \code{log}, \code{expm1}, \code{log1p},\cr \code{cos}, \code{sin}, \code{tan},\cr \code{cospi}, \code{sinpi}, \code{tanpi},\cr \code{acos}, \code{asin}, \code{atan} \code{cosh}, \code{sinh}, \code{tanh},\cr \code{acosh}, \code{asinh}, \code{atanh} \item \code{lgamma}, \code{gamma}, \code{digamma}, \code{trigamma} \item \code{cumsum}, \code{cumprod}, \code{cummax}, \code{cummin} } \item Group \code{"Ops"}: \itemize{ \item \code{"+"}, \code{"-"}, \code{"*"}, \code{"/"}, \code{"^"}, \code{"\%\%"}, \code{"\%/\%"} \item \code{"&"}, \code{"|"}, \code{"!"} \item \code{"=="}, \code{"!="}, \code{"<"}, \code{"<="}, \code{">="}, \code{">"} } \item Group \code{"Summary"}: \itemize{ \item \code{all}, \code{any} \item \code{sum}, \code{prod} \item \code{min}, \code{max} \item \code{range} } \item Group \code{"Complex"}: \itemize{ \item \code{Arg}, \code{Conj}, \code{Im}, \code{Mod}, \code{Re} } } } \seealso{ \code{\link{eval.linim}} for evaluating expressions involving images. } \value{ The return value is another object of class \code{"linim"}, except in the following cases: \code{all} and \code{any} return a single logical value; \code{sum}, \code{prod}, \code{min} and \code{max} return a single numerical value; \code{range} returns a vector of two numerical values. } \examples{ fx <- function(x,y,seg,tp) { (x - y)^2 } fL <- linfun(fx, simplenet) Z <- as.linim(fL) A <- Z+2 A <- -Z A <- sqrt(Z) A <- !(Z > 0.1) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat.linnet/man/pairdist.lpp.Rd0000644000176200001440000000364114141460471017066 0ustar liggesusers\name{pairdist.lpp} \alias{pairdist.lpp} \title{ Pairwise shortest-path distances between points on a linear network } \description{ Given a pattern of points on a linear network, compute the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. } \usage{ \method{pairdist}{lpp}(X, ..., method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. If two points cannot be joined by a path, the distance between them is infinite (\code{Inf}). The argument \code{method} is not normally used. It is retained only for developers to check the validity of the software. } \section{Algorithms and accuracy}{ Distances are accurate within the numerical tolerance of the network, \code{summary(X)$toler}. For network data stored in the non-sparse representation described in \code{\link{linnet}}, then pairwise distances are computed using the matrix of path distances between vertices of the network, using \R code if \code{method = "interpreted"}, or using C code if \code{method="C"} (the default). For networks stored in the sparse representation, the argument \code{method} has no effect, and the distances are computed using an efficient C algorithm. } \value{ A symmetric matrix, whose values are nonnegative numbers or infinity (\code{Inf}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(12, simplenet) d <- pairdist(X) d[1:3, 1:3] } \keyword{spatial} spatstat.linnet/man/identify.lpp.Rd0000644000176200001440000000354614141460471017066 0ustar liggesusers\name{identify.lpp} \alias{identify.lpp} \title{Identify Points in a Point Pattern on a Linear Network} \description{ If a point pattern on a network is plotted in the graphics window, this function will find the point of the pattern which is nearest to the mouse position, and print its mark value (or its serial number if there is no mark). } \usage{ \method{identify}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{identify.default}}. } } \value{ If \code{x} is unmarked, the result is a vector containing the serial numbers of the points in the pattern \code{x} that were identified. If \code{x} is marked, the result is a 2-column matrix, the first column containing the serial numbers and the second containing the marks for these points. } \details{ This is a method for the generic function \code{\link[graphics]{identify}} for point patterns on a linear network (objects of class \code{"lpp"}). The point pattern \code{x} should first be plotted using \code{\link{plot.lpp}}. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the point of the pattern \code{x} closest to the mouse position. If this closest point is sufficiently close to the mouse pointer, its index (and its mark if any) will be returned as part of the value of the call. Each time a point of the pattern is identified, text will be displayed next to the point, showing its serial number (if \code{x} is unmarked) or its mark value (if \code{x} is marked). } \seealso{ \code{\link[spatstat.geom]{identify.ppp}}, \code{\link[graphics]{identify}}, \code{\link{clicklpp}} } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{iplot} spatstat.linnet/man/linearpcf.Rd0000644000176200001440000000625514141460471016424 0ustar liggesusers\name{linearpcf} \alias{linearpcf} \title{ Linear Pair Correlation Function } \description{ Computes an estimate of the linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcf(X, r=NULL, ..., correction="Ang", ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the linear pair correlation function from point pattern data on a linear network. The pair correlation function is estimated from the shortest-path distances between each pair of data points, using the fixed-bandwidth kernel smoother \code{\link{density.default}}, with a bias correction at each end of the interval of \eqn{r} values. To switch off the bias correction, set \code{endcorrect=FALSE}. The bandwidth for smoothing the pairwise distances is determined by arguments \code{\dots} passed to \code{\link{density.default}}, mainly the arguments \code{bw} and \code{adjust}. The default is to choose the bandwidth by Silverman's rule of thumb \code{bw="nrd0"} explained in \code{\link{density.default}}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is an estimate of the first derivative of the network \eqn{K} function defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The result is an estimate of the pair correlation function in the linear network. } \value{ Function value table (object of class \code{"fv"}). If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearK}}, \code{\link{linearpcfinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearpcf(X) linearpcf(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/treebranchlabels.Rd0000644000176200001440000000442114141460471017752 0ustar liggesusers\name{treebranchlabels} \alias{treebranchlabels} \title{ Label Vertices of a Tree by Branch Membership } \description{ Given a linear network which is a tree (acyclic graph), this function assigns a label to each vertex, indicating its position in the tree. } \usage{ treebranchlabels(L, root = 1) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). The network must have no loops. } \item{root}{ Root of the tree. An integer index identifying which point in \code{vertices(L)} is the root of the tree. } } \details{ The network \code{L} should be a tree, that is, it must have no loops. This function computes a character string label for each vertex of the network \code{L}. The vertex identified by \code{root} (that is, \code{vertices(L)[root]}) is taken as the root of the tree and is given the empty label \code{""}. \itemize{ \item If there are several line segments which meet at the root vertex, each of these segments is the start of a new branch of the tree; the other endpoints of these segments are assigned the labels \code{"a"}, \code{"b"}, \code{"c"} and so on. \item If only one segment issues from the root vertex, the other endpoint of this segment is assigned the empty label \code{""}. } A similar rule is then applied to each of the newly-labelled vertices. If the vertex labelled \code{"a"} is joined to two other unlabelled vertices, these will be labelled \code{"aa"} and \code{"ab"}. The rule is applied recursively until all vertices have been labelled. If \code{L} is not a tree, the algorithm will terminate, but the results will be nonsense. } \value{ A vector of character strings, with one entry for each point in \code{vertices(L)}. } \author{ \spatstatAuthors } \seealso{ \code{\link{deletebranch}}, \code{\link{extractbranch}}, \code{\link{treeprune}} for manipulating a network using the branch labels. \code{\link{linnet}} for creating a network. } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) } \keyword{spatial} \keyword{math} spatstat.linnet/man/auc.lpp.Rd0000644000176200001440000000735614141460471016026 0ustar liggesusers\name{auc.lpp} \alias{auc.lpp} \alias{auc.lppm} \title{ Area Under ROC Curve for Data on a Network } \description{ Compute the AUC (area under the Receiver Operating Characteristic curve) for a fitted point process model on a linear network. } \usage{ \method{auc}{lpp}(X, covariate, \dots, high = TRUE) \method{auc}{lppm}(X, \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"} or \code{"lpp"}) or fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"} or \code{"linim"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ This command computes the AUC, the area under the Receiver Operating Characteristic curve. The ROC itself is computed by \code{\link{roc}}. The function \code{\link[spatstat.core]{auc}} is generic, with methods for \code{"ppp"} and \code{"ppm"} described in the help file for \code{\link[spatstat.core]{auc}}. This help file describes the methods for classes \code{"lpp"} and \code{"lppm"}. For a point pattern \code{X} and a covariate \code{Z}, the AUC is a numerical index that measures the ability of the covariate to separate the spatial domain into areas of high and low density of points. Let \eqn{x_i}{x[i]} be a randomly-chosen data point from \code{X} and \eqn{U} a randomly-selected location in the study region. The AUC is the probability that \eqn{Z(x_i) > Z(U)}{Z(x[i]) > Z(U)} assuming \code{high=TRUE}. That is, AUC is the probability that a randomly-selected data point has a higher value of the covariate \code{Z} than does a randomly-selected spatial location. The AUC is a number between 0 and 1. A value of 0.5 indicates a complete lack of discriminatory power. For a fitted point process model \code{X}, the AUC measures the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. Suppose \eqn{\lambda(u)}{\lambda(u)} is the intensity function of the model. The AUC is the probability that \eqn{\lambda(x_i) > \lambda(U)}{\lambda(x[i]) > \lambda(U)}. That is, AUC is the probability that a randomly-selected data point has higher predicted intensity than does a randomly-selected spatial location. The AUC is \bold{not} a measure of the goodness-of-fit of the model (Lobo et al, 2007). } \value{ Numeric. For \code{auc.lpp}, the result is a single number giving the AUC value. For \code{auc.lppm}, the result is a numeric vector of length 2 giving the AUC value and the theoretically expected AUC value for this model. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.core]{auc}}, \code{\link[spatstat.core]{roc}}, \code{\link{roc.lpp}} } \examples{ auc(spiders, "x") fit <- lppm(spiders ~ x + y) auc(fit) } \keyword{spatial} spatstat.linnet/man/envelope.lpp.Rd0000644000176200001440000002567214141460471017074 0ustar liggesusers\name{envelope.lpp} \alias{envelope.lpp} \alias{envelope.lppm} \title{ Envelope for Point Patterns on Linear Network } \description{ Enables envelopes to be computed for point patterns on a linear network. } \usage{ \method{envelope}{lpp}(Y, fun=linearK, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{lppm}(Y, fun=linearK, nsim=99, nrank=1, \dots, funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL,use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A point pattern on a linear network (object of class \code{"lpp"}) or a fitted point process model on a linear network (object of class \code{"lppm"}). } \item{fun}{ Function that is to be computed for each simulated pattern. } \item{nsim}{ Number of simulations to perform. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{funargs}{ A list, containing extra arguments to be passed to \code{fun}. } \item{funYargs}{ Optional. A list, containing extra arguments to be passed to \code{fun} when applied to the original data \code{Y} only. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a function, then this function will be repeatedly applied to the data pattern \code{Y} to obtain \code{nsim} simulated patterns. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{fix.n}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points as the original data pattern. } \item{fix.marks}{ Logical. If \code{TRUE}, simulated patterns will have the same number of points \emph{and} the same marks as the original data pattern. In a multitype point pattern this means that the simulated patterns will have the same number of points \emph{of each type} as the original data. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{use.theory}{ Logical value indicating whether to use the theoretical value, computed by \code{fun}, as the reference value for simultaneous envelopes. Applicable only when \code{global=TRUE}. } \item{alternative}{ Character string determining whether the envelope corresponds to a two-sided test (\code{side="two.sided"}, the default) or a one-sided test with a lower critical boundary (\code{side="less"}) or a one-sided test with an upper critical boundary (\code{side="greater"}). } \item{scale}{ Optional. Scaling function for global envelopes. A function in the \R language which determines the relative scale of deviations, as a function of distance \eqn{r}, when computing the global envelopes. Applicable only when \code{global=TRUE}. Summary function values for distance \code{r} will be \emph{divided} by \code{scale(r)} before the maximum deviation is computed. The resulting global envelopes will have width proportional to \code{scale(r)}. } \item{clamp}{ Logical value indicating how to compute envelopes when \code{alternative="less"} or \code{alternative="greater"}. Deviations of the observed summary function from the theoretical summary function are initially evaluated as signed real numbers, with large positive values indicating consistency with the alternative hypothesis. If \code{clamp=FALSE} (the default), these values are not changed. If \code{clamp=TRUE}, any negative values are replaced by zero. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields a fatal error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{rejectNA}{ Logical value specifying whether to reject a simulated pattern if the resulting values of \code{fun} are all equal to \code{NA}, \code{NaN} or infinite. If \code{FALSE} (the default), then simulated patterns are rejected only when \code{fun} gives a fatal error. } \item{silent}{ Logical value specifying whether to print a report each time a simulated pattern is rejected. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \details{ This is a method for the generic function \code{\link{envelope}} applicable to point patterns on a linear network. The argument \code{Y} can be either a point pattern on a linear network, or a fitted point process model on a linear network. The function \code{fun} will be evaluated for the data and also for \code{nsim} simulated point patterns on the same linear network. The upper and lower envelopes of these evaluated functions will be computed as described in \code{\link{envelope}}. The type of simulation is determined as follows. \itemize{ \item if \code{Y} is a point pattern (object of class \code{"lpp"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated according to a Poisson point process on the linear network on which \code{Y} is defined, with intensity estimated from \code{Y}. \item if \code{Y} is a fitted point process model (object of class \code{"lppm"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated by simulating from the fitted model. \item If \code{simulate} is present, it specifies the type of simulation as explained below. \item If \code{simulate} is an expression (typically including a call to a random generator), then the expression will be repeatedly evaluated, and should yield random point patterns on the same linear network as \code{Y}. \item If \code{simulate} is a function (typically including a call to a random generator), then the function will be repeatedly applied to the original point pattern \code{Y}, and should yield random point patterns on the same linear network as \code{Y}. \item If \code{simulate} is a list of point patterns, then these will be taken as the simulated point patterns. They should be on the same linear network as \code{Y}. } The function \code{fun} should accept as its first argument a point pattern on a linear network (object of class \code{"lpp"}) and should have another argument called \code{r} or a \code{\dots} argument. } \value{ Function value table (object of class \code{"fv"}) with additional information, as described in \code{\link{envelope}}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{envelope}}, \code{\link{linearK}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \examples{ if(interactive()) { ns <- 39 np <- 40 } else { ns <- np <- 3 } X <- runiflpp(np, simplenet) # uniform Poisson: random numbers of points envelope(X, nsim=ns) # uniform Poisson: conditional on observed number of points envelope(X, fix.n=TRUE, nsim=ns) # nonuniform Poisson fit <- lppm(X ~x) envelope(fit, nsim=ns) #multitype marks(X) <- sample(letters[1:2], np, replace=TRUE) envelope(X, nsim=ns) } \keyword{spatial} spatstat.linnet/man/linearKdot.inhom.Rd0000644000176200001440000001040014141460471017651 0ustar liggesusers\name{linearKdot.inhom} \alias{linearKdot.inhom} \title{ Inhomogeneous multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdadot} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdadot} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } K <- linearKdot.inhom(chicago, "assault", lamI, lam.) # using fitted models for the intensity # fit <- lppm(chicago ~marks + x) # linearKdot.inhom(chicago, "assault", fit, fit) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/lineartileindex.Rd0000644000176200001440000000421214141460471017630 0ustar liggesusers\name{lineartileindex} \alias{lineartileindex} \title{ Determine Which Tile Contains Each Given Point on a Linear Network } \description{ Given a tessellation on a linear network, and a list of points on the network, determine which tile of the tessellation contains each of the given points. } \usage{ lineartileindex(seg, tp, Z, method = c("encode", "C", "interpreted")) } \arguments{ \item{seg,tp}{ Vectors of local coordinates of the query points. See Details. } \item{Z}{ A tessellation on a linear network (object of class \code{"lintess"}). } \item{method}{ Internal use only. } } \details{ This low-level function is the analogue of \code{\link{tileindex}} for linear networks. For a tessellation \code{Z} on a linear network, and a list of query points on the same network, the function determines which tile of the tessellation contains each query point. Argument \code{Z} should be a tessellation on a linear network (object of class \code{"lintess"}). The vectors \code{seg} and \code{tp} specify the locations of the query points, on the same network, using local coordinates: \code{seg} contains integer values specifying which segment of the network contains each query point; \code{tp} contains numeric values between 0 and 1 specifying the fractional position along that segment. The result is a factor, of the same length as \code{seg} and \code{tp}, indicating which tile contains each point. The levels of the factor are the names of the tiles of \code{Z}. } \value{ A factor, of the same length as \code{seg} and \code{tp}, whose levels are the names of the tiles of \code{Z}. } \author{ \spatstatAuthors } \seealso{ \code{\link{lintess}}. \code{\link{as.linfun.lintess}} to create a function whose value is the tile index. \code{\link{cut.lpp}} for a neater way to classify the points of a point pattern on a linear network according to a tessellation on the network. } \examples{ Z <- lineardirichlet(runiflpp(15, simplenet)) X <- runiflpp(10, simplenet) coX <- coords(X) ii <- lineartileindex(coX$seg, coX$tp, Z) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/rpoislpp.Rd0000644000176200001440000000345514141460471016330 0ustar liggesusers\name{rpoislpp} \alias{rpoislpp} \title{ Poisson Point Process on a Linear Network } \description{ Generates a realisation of the Poisson point process with specified intensity on the given linear network. } \usage{ rpoislpp(lambda, L, \dots, nsim=1, drop=TRUE) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). Can be omitted in some cases: see Details. } \item{\dots}{ Arguments passed to \code{\link{rpoisppOnLines}}. } \item{nsim}{Number of simulated realisations to generate.} \item{drop}{ Logical value indicating what to do when \code{nsim=1}. If \code{drop=TRUE} (the default), the result is a point pattern. If \code{drop=FALSE}, the result is a list with one entry which is a point pattern. } } \details{ This function uses \code{\link{rpoisppOnLines}} to generate the random points. Argument \code{L} can be omitted, and defaults to \code{as.linnet(lambda)}, when \code{lambda} is a function on a linear network (class \code{"linfun"}) or a pixel image on a linear network (\code{"linim"}). } \value{ If \code{nsim = 1} and \code{drop=TRUE}, a point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. Otherwise, a list of such point patterns. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{runiflpp}}, \code{\link{rlpp}}, \code{\link{lpp}}, \code{\link{linnet}} } \examples{ X <- rpoislpp(5, simplenet) plot(X) # multitype X <- rpoislpp(c(a=5, b=5), simplenet) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/macros/0000755000176200001440000000000014141460471015446 5ustar liggesusersspatstat.linnet/man/macros/defns.Rd0000644000176200001440000000432014141460471017033 0ustar liggesusers%% macro definitions for spatstat man pages \newcommand{\adrian}{Adrian Baddeley \email{Adrian.Baddeley@curtin.edu.au}} \newcommand{\rolf}{Rolf Turner \email{r.turner@auckland.ac.nz}} \newcommand{\ege}{Ege Rubak \email{rubak@math.aau.dk}} \newcommand{\spatstatAuthors}{\adrian, \rolf and \ege} % Names with accents \newcommand{\Bogsted}{\ifelse{latex}{\out{B\o gsted}}{Bogsted}} \newcommand{\Cramer}{\ifelse{latex}{\out{Cram\'er}}{Cramer}} \newcommand{\Hogmander}{\ifelse{latex}{\out{H{\"o}gmander}}{Hogmander}} \newcommand{\Jyvaskyla}{\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}} \newcommand{\Matern}{\ifelse{latex}{\out{Mat\'ern}}{Matern}} \newcommand{\Moller}{\ifelse{latex}{\out{M\o ller}}{Moller}} \newcommand{\Oehlschlaegel}{\ifelse{latex}{\out{Oehlschl\"{a}gel}}{Oehlschlaegel}} \newcommand{\Prokesova}{\ifelse{latex}{\out{Proke\u{s}ov{\'{a}}}}{Prokesova}} \newcommand{\Sarkka}{\ifelse{latex}{\out{S\"{a}rkk\"{a}}}{Sarkka}} %% List of all Gibbs interactions \newcommand{\GibbsInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{Concom}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{HierHard}}, \code{\link{HierStrauss}}, \code{\link{HierStraussHard}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Saturated}}, \code{\link{SatPiece}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} %% List of interactions recognised by RMH code \newcommand{\rmhInteractionsList}{\code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Penttinen}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Triplets}}} spatstat.linnet/man/methods.linim.Rd0000644000176200001440000000600614141460471017225 0ustar liggesusers\name{methods.linim} \Rdversion{1.1} \alias{methods.linim} %DoNotExport \alias{as.im.linim} \alias{as.data.frame.linim} \alias{print.linim} \alias{summary.linim} \alias{affine.linim} \alias{scalardilate.linim} \alias{shift.linim} \title{ Methods for Images on a Linear Network } \description{ Methods for the class \code{"linim"} of functions on a linear network. } \usage{ \method{print}{linim}(x, \dots) \method{summary}{linim}(object, \dots) \method{as.im}{linim}(X, \dots) \method{as.data.frame}{linim}(x, \dots) \method{shift}{linim}(X, \dots) \method{scalardilate}{linim}(X, f, \dots, origin=NULL) \method{affine}{linim}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X,x,object}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{\dots}{ Extra arguments passed to other methods. } \item{f}{Numeric. Scalar dilation factor.} \item{mat}{Numeric matrix representing the linear transformation.} \item{vec}{Numeric vector of length 2 specifying the shift vector.} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}} and \code{\link{as.data.frame}}, and the \pkg{spatstat} generic functions \code{\link{as.im}}, \code{\link{shift}}, \code{\link{scalardilate}} and \code{\link{affine}}. An object of class \code{"linfun"} represents a pixel image defined on a linear network. The method \code{as.im.linim} extracts the pixel values and returns a pixel image of class \code{"im"}. The method \code{as.data.frame.linim} returns a data frame giving spatial locations (in cartesian and network coordinates) and corresponding function values. The methods \code{shift.linim}, \code{scalardilate.linim} and \code{affine.linim} apply geometric transformations to the pixels and the underlying linear network, without changing the pixel values. } \value{ For \code{print.linim} the result is \code{NULL}. The function \code{summary.linim} returns an object of class \code{"summary.linim"}. In normal usage this summary is automatically printed by \code{\link{print.summary.linim}}. For \code{as.im.linim} the result is an object of class \code{"im"}. For the geometric transformations \code{shift.linim}, \code{scalardilate.linim} and \code{affine.linim}, the result is another object of class \code{"linim"}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) ## ............ print basic details ......................... X ## ............ print gory details ......................... summary(X) ## ........................................................... shift(X, c(1,1)) scalardilate(X, 2) head(as.data.frame(X)) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat.linnet/man/repairNetwork.Rd0000644000176200001440000000170414141460471017307 0ustar liggesusers\name{repairNetwork} \alias{repairNetwork} \title{ Repair Internal Data in a Linear Network } \description{ Detect and repair inconsistencies or duplication in the internal data of a network object. } \usage{ repairNetwork(X) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}) or a point pattern on a linear network (object of class \code{"lpp"}). } } \details{ This function detects and repairs inconsistencies in the internal data of \code{X}. Currently it does the following: \itemize{ \item checks that different ways of calculating the number of edges give the same answer \item removes any duplicated edges of the network \item ensures that each edge is recorded as a pair of vertex indices \code{(from, to)} with \code{from < to}. } } \value{ An object of the same kind as \code{X}. } \author{ \adrian. } \seealso{ \code{\link{thinNetwork}} } \keyword{spatial} \keyword{manip} spatstat.linnet/man/deletebranch.Rd0000644000176200001440000000463714141460471017103 0ustar liggesusers\name{deletebranch} \alias{deletebranch} \alias{deletebranch.linnet} \alias{deletebranch.lpp} \alias{extractbranch} \alias{extractbranch.linnet} \alias{extractbranch.lpp} \title{ Delete or Extract a Branch of a Tree } \description{ Deletes or extracts a given branch of a tree. } \usage{ deletebranch(X, \dots) \method{deletebranch}{linnet}(X, code, labels, \dots) \method{deletebranch}{lpp}(X, code, labels, \dots) extractbranch(X, \dots) \method{extractbranch}{linnet}(X, code, labels, \dots, which=NULL) \method{extractbranch}{lpp}(X, code, labels, \dots, which=NULL) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{code}{ Character string. Label of the branch to be deleted or extracted. } \item{labels}{ Vector of character strings. Branch labels for the vertices of the network, usually obtained from \code{\link{treebranchlabels}}. } \item{\dots}{Arguments passed to methods.} \item{which}{ Logical vector indicating which vertices of the network should be extracted. Overrides \code{code} and \code{labels}. } } \details{ The linear network \code{L <- X} or \code{L <- as.linnet(X)} must be a tree, that is, it has no loops. The argument \code{labels} should be a character vector giving tree branch labels for each vertex of the network. It is usually obtained by calling \code{\link{treebranchlabels}}. The branch designated by the string \code{code} will be deleted or extracted. The return value is the result of deleting or extracting this branch from \code{X} along with any data associated with this branch (such as points or marks). } \value{ Another object of the same type as \code{X} obtained by deleting or extracting the specified branch. } \author{ \spatstatAuthors } \seealso{ \code{\link{treebranchlabels}}, \code{\link{branchlabelfun}}, \code{\link{linnet}} } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) # delete branch B LminusB <- deletebranch(L, "b", tb) plot(LminusB, add=TRUE, col="green") # extract branch B LB <- extractbranch(L, "b", tb) plot(LB, add=TRUE, col="red") } \keyword{spatial} \keyword{manip} spatstat.linnet/man/roc.lpp.Rd0000644000176200001440000000605714141460471016036 0ustar liggesusers\name{roc.lpp} \alias{roc.lpp} \alias{roc.lppm} \title{ Receiver Operating Characteristic for Data on a Network } \description{ Computes the Receiver Operating Characteristic curve for a point pattern or a fitted point process model on a linear network. } \usage{ \method{roc}{lpp}(X, covariate, \dots, high = TRUE) \method{roc}{lppm}(X, \dots) } \arguments{ \item{X}{ Point pattern on a network (object of class \code{"lpp"}) or fitted point process model on a network (object of class \code{"lppm"}). } \item{covariate}{ Spatial covariate. Either a \code{function(x,y)}, a pixel image (object of class \code{"im"} or \code{"linim"}), or one of the strings \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} controlling the pixel resolution for calculations. } \item{high}{ Logical value indicating whether the threshold operation should favour high or low values of the covariate. } } \details{ The command \code{roc} computes the Receiver Operating Characteristic curve. The area under the ROC is computed by \code{\link[spatstat.core]{auc}}. The function \code{\link[spatstat.core]{roc}} is generic, with methods for \code{"ppp"} and \code{"ppm"} described in the help file for \code{\link[spatstat.core]{roc}}. This help file describes the methods for classes \code{"lpp"} and \code{"lppm"}. For a point pattern \code{X} and a covariate \code{Z}, the ROC is a plot showing the ability of the covariate to separate the spatial domain into areas of high and low density of points. For each possible threshold \eqn{z}, the algorithm calculates the fraction \eqn{a(z)} of area in the study region where the covariate takes a value greater than \eqn{z}, and the fraction \eqn{b(z)} of data points for which the covariate value is greater than \eqn{z}. The ROC is a plot of \eqn{b(z)} against \eqn{a(z)} for all thresholds \eqn{z}. For a fitted point process model, the ROC shows the ability of the fitted model intensity to separate the spatial domain into areas of high and low density of points. The ROC is \bold{not} a diagnostic for the goodness-of-fit of the model (Lobo et al, 2007). } \value{ Function value table (object of class \code{"fv"}) which can be plotted to show the ROC curve. } \references{ Lobo, J.M., \ifelse{latex}{\out{Jim{\'e}nez}}{Jimenez}-Valverde, A. and Real, R. (2007) AUC: a misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography} \bold{17}(2) 145--151. Nam, B.-H. and D'Agostino, R. (2002) Discrimination index, the area under the {ROC} curve. Pages 267--279 in Huber-Carol, C., Balakrishnan, N., Nikulin, M.S. and Mesbah, M., \emph{Goodness-of-fit tests and model validity}, \ifelse{latex}{\out{Birkh{\"a}user}}{Birkhauser}, Basel. } \author{ \spatstatAuthors. } \seealso{ \code{\link{auc.lpp}} } \examples{ plot(roc(spiders, "x")) fit <- lppm(spiders ~ x) plot(roc(fit)) } \keyword{spatial} spatstat.linnet/man/lineardirichlet.Rd0000644000176200001440000000257614141460471017625 0ustar liggesusers\name{lineardirichlet} \alias{lineardirichlet} \title{ Dirichlet Tessellation on a Linear Network } \description{ Given a point pattern on a linear network, compute the Dirichlet (or Voronoi or Thiessen) tessellation induced by the points. } \usage{ lineardirichlet(X) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } } \details{ The Dirichlet tessellation induced by a point pattern \code{X} on a linear network \code{L} is a partition of \code{L} into subsets. The subset \code{L[i]} associated with the data point \code{X[i]} is the part of \code{L} lying closer to \code{X[i]} than to any other data point \code{X[j]}, where distance is measured by the shortest path. } \section{Missing tiles}{ If the linear network is not connected, and if one of the connected components contains no data points, then the Dirichlet tessellation is mathematically undefined inside this component. The resulting tessellation object includes a tile with label \code{NA}, which contains this component of the network. A plot of the tessellation will not show this tile. } \value{ A tessellation on a linear network (object of class \code{"lintess"}). } \author{ \adrian. } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(5, simplenet) plot(lineardirichlet(X), lwd=3) points(X) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/is.connected.linnet.Rd0000644000176200001440000000223214141460471020314 0ustar liggesusers\name{is.connected.linnet} \Rdversion{1.1} \alias{is.connected.linnet} \title{ Determine Whether a Linear Network is Connected } \description{ Determine whether a linear network is topologically connected. } \usage{ \method{is.connected}{linnet}(X, \dots) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{connected.linnet}} to determine the connected components. } } \details{ The command \code{is.connected(X)} returns \code{TRUE} if the network \code{X} consists of a single, topologically-connected piece, and returns \code{FALSE} if \code{X} consists of several pieces which are not joined together. The function \code{\link[spatstat.geom]{is.connected}} is generic, with methods for several classes. This help file documents the method for linear networks, \code{is.connected.linnet}. } \value{ A logical value. } \seealso{ \code{\link[spatstat.geom]{is.connected}}, \code{\link[spatstat.geom]{connected}}, \code{\link{connected.lpp}}. } \examples{ is.connected(simplenet) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{math} spatstat.linnet/man/clicklpp.Rd0000644000176200001440000000550714141460471016261 0ustar liggesusers\name{clicklpp} \alias{clicklpp} \title{Interactively Add Points on a Linear Network} \description{ Allows the user to create a point pattern on a linear network by point-and-click in the display. } \usage{ clicklpp(L, n=NULL, types=NULL, \dots, add=FALSE, main=NULL, hook=NULL) } \arguments{ \item{L}{ Linear network on which the points will be placed. An object of class \code{"linnet"}. } \item{n}{ Number of points to be added (if this is predetermined). } \item{types}{ Vector of types, when creating a multitype point pattern. } \item{\dots}{ Optional extra arguments to be passed to \code{\link[graphics]{locator}} to control the display. } \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{main}{ Main heading for plot. } \item{hook}{For internal use only. Do not use this argument.} } \value{ A point pattern (object of class \code{"lpp"}). } \details{ This function allows the user to create a point pattern on a linear network by interactively clicking on the screen display. First the linear network \code{L} is plotted on the current screen device. Then the user is prompted to point the mouse at any desired locations and click the left mouse button to add each point. Interactive input stops after \code{n} clicks (if \code{n} was given) or when the middle mouse button is pressed. The return value is a point pattern on the network \code{L}, containing the locations of all the clicked points, after they have been projected onto the network \code{L}. Any points that were clicked outside the bounding window of the network will be ignored. If the argument \code{types} is given, then a multitype point pattern will be created. The user is prompted to input the locations of points of type \code{type[i]}, for each successive index \code{i}. (If the argument \code{n} was given, there will be \code{n} points of \emph{each} type.) The return value is a multitype point pattern on a linear network. This function uses the \R{} command \code{\link[graphics]{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link[graphics]{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link[graphics]{locator}} and \code{\link[graphics]{par}}. } \seealso{ \code{\link{clickppp}}, \code{\link{identify.lpp}}, \code{\link[graphics]{locator}}, \code{\link{clickpoly}}, \code{\link{clickbox}}, \code{\link{clickdist}} } \author{ \spatstatAuthors, based on an idea by Dominic Schuhmacher. } \keyword{spatial} \keyword{iplot} spatstat.linnet/man/as.linfun.Rd0000644000176200001440000000423514141460471016352 0ustar liggesusers\name{as.linfun} \alias{as.linfun} \alias{as.linfun.linim} \alias{as.linfun.lintess} \title{ Convert Data to a Function on a Linear Network } \description{ Convert some kind of data to an object of class \code{"linfun"} representing a function on a linear network. } \usage{ as.linfun(X, \dots) \method{as.linfun}{linim}(X, \dots) \method{as.linfun}{lintess}(X, \dots, values=marks(X), navalue=NA) } \arguments{ \item{X}{ Some kind of data to be converted. } \item{\dots}{ Other arguments passed to methods. } \item{values}{ Optional. Vector of function values, one entry associated with each tile of the tessellation. } \item{navalue}{ Optional. Function value associated with locations that do not belong to a tile of the tessellation. } } \details{ An object of class \code{"linfun"} represents a function defined on a linear network. The function \code{as.linfun} is generic. The method \code{as.linfun.linim} converts objects of class \code{"linim"} (pixel images on a linear network) to functions on the network. The method \code{as.linfun.lintess} converts a tessellation on a linear network into a function with a different value on each tile of the tessellation. The function values are specified by the argument \code{values}. It should be a vector with one entry for each tile of the tessellation; any point lying in tile number \code{i} will return the value \code{v[i]}. If \code{values} is missing, the marks of the tessellation are taken as the function values. If \code{values} is missing and the tessellation has no marks, or if \code{values} is given as \code{NULL}, then the function returns factor values identifying which tile contains each given point. } \value{ Object of class \code{"linfun"}. } \author{ \spatstatAuthors. } \seealso{ \code{\link{linfun}} } \examples{ X <- runiflpp(2, simplenet) Y <- runiflpp(5, simplenet) # image on network D <- density(Y, 0.1) f <- as.linfun(D) f f(X) # tessellation on network Z <- lineardirichlet(Y) g <- as.linfun(Z) g(X) h <- as.linfun(Z, values = runif(5)) h(X) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/crossdist.lpp.Rd0000644000176200001440000000560414141460471017265 0ustar liggesusers\name{crossdist.lpp} \alias{crossdist.lpp} \title{Pairwise distances between two point patterns on a linear network} \description{ Computes the distances between pairs of points taken from two different point patterns on the same linear network. } \usage{ \method{crossdist}{lpp}(X, Y, \dots, method="C", check=TRUE) } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} network. } \item{\dots}{ Ignored. } \item{method}{ String specifying which method of calculation to use when the network data use the non-sparse representation. Values are \code{"C"} and \code{"interpreted"}. } \item{check}{ Logical value specifying whether to check that \code{X} and \code{Y} are defined on the same network. Default is \code{check=TRUE}. Setting \code{check=FALSE} will save time, but should only be used if it is certain that the two networks are identical. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. Matrix entries are nonnegative numbers or infinity (\code{Inf}). } \details{ Given two point patterns on a linear network, this function computes the distance from each point in the first pattern to each point in the second pattern, measuring distance by the shortest path along the network. This is a method for the generic function \code{\link{crossdist}} for the class of point patterns on a linear network (objects of class \code{"lpp"}). This function expects two point pattern objects \code{X} and \code{Y} on the \emph{same} linear network, and returns the matrix whose \code{[i,j]} entry is the shortest-path distance from \code{X[i]} to \code{Y[j]}. If two points cannot be joined by a path, the distance between them is infinite (\code{Inf}). The argument \code{method} is not normally used. It is retained only for developers to check the validity of the software. } \section{Algorithms and accuracy}{ Distances are accurate within the numerical tolerance of the network, \code{summary(X)$toler}. For network data stored in the non-sparse representation described in \code{\link{linnet}}, then pairwise distances are computed using the matrix of path distances between vertices of the network, using \R code if \code{method = "interpreted"}, or using C code if \code{method="C"} (the default). For networks stored in the sparse representation, the argument \code{method} has no effect, and the distances are computed using an efficient C algorithm. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ v <- split(chicago) X <- v$cartheft Y <- v$burglary d <- crossdist(X, Y) d[1:3,1:4] } \author{ \adrian. } \keyword{spatial} \keyword{math} spatstat.linnet/man/treeprune.Rd0000644000176200001440000000306014141460471016461 0ustar liggesusers\name{treeprune} \alias{treeprune} \title{ Prune Tree to Given Level } \description{ Prune a tree by removing all the branches above a given level. } \usage{ treeprune(X, root = 1, level = 0) } \arguments{ \item{X}{ Object of class \code{"linnet"} or \code{"lpp"}. } \item{root}{ Index of the root vertex amongst the vertices of \code{as.linnet(X)}. } \item{level}{ Integer specifying the level above which the tree should be pruned. } } \details{ The object \code{X} must be either a linear network, or a derived object such as a point pattern on a linear network. The linear network must be an acyclic graph (i.e. must not contain any loops) so that it can be interpreted as a tree. This function removes all vertices for which \code{\link{treebranchlabels}} gives a string more than \code{level} characters long. } \value{ Object of the same kind as \code{X}. } \author{ \spatstatAuthors } \seealso{ \code{\link{treebranchlabels}} for calculating the branch labels. \code{\link{deletebranch}} for removing entire branches. \code{\link{extractbranch}} for extracting entire branches. \code{\link{linnet}} for creating networks. } \examples{ # make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) plot(L, main="") # compute branch labels tb <- treebranchlabels(L, 1) tbc <- paste0("[", tb, "]") text(vertices(L), labels=tbc, cex=2) # prune tree tp <- treeprune(L, root=1, 1) plot(tp, add=TRUE, col="blue", lwd=3) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/linearpcfdot.inhom.Rd0000644000176200001440000001046214141460471020237 0ustar liggesusers\name{linearpcfdot.inhom} \alias{linearpcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdadot} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross.inhom}}, \code{\link{linearpcfcross}}, \code{\link{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } g <- linearpcfdot.inhom(chicago, "assault", lamI, lam.) # using fitted models for the intensity # fit <- lppm(chicago, ~marks + x) # linearpcfdot.inhom(chicago, "assault", fit, fit) } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/fitted.lppm.Rd0000644000176200001440000000527114141460471016704 0ustar liggesusers\name{fitted.lppm} \alias{fitted.lppm} \title{ Fitted Intensity for Point Process on Linear Network } \description{ Given a point process model fitted to a point pattern on a linear network, compute the fitted intensity of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{lppm}(object, \dots, dataonly = FALSE, new.coef = NULL, leaveoneout = FALSE) } \arguments{ \item{object}{ Fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ Ignored. } \item{dataonly}{ Logical value indicating whether to computed fitted intensities at the points of the original point pattern dataset (\code{dataonly=TRUE}) or at all the quadrature points of the quadrature scheme used to fit the model (\code{dataonly=FALSE}, the default). } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{leaveoneout}{ Logical. If \code{TRUE} the fitted value at each data point will be computed using a leave-one-out method. See Details. } } \details{ This is a method for the generic function \code{\link[stats]{fitted}} for the class \code{"lppm"} of fitted point process models on a linear network. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. If \code{leaveoneout=TRUE}, fitted values will be computed for the data points only, using a \sQuote{leave-one-out} rule: the fitted value at \code{X[i]} is effectively computed by deleting this point from the data and re-fitting the model to the reduced pattern \code{X[-i]}, then predicting the value at \code{X[i]}. (Instead of literally performing this calculation, we apply a Taylor approximation using the influence function computed in \code{\link{dfbetas.ppm}}. } \value{ A vector containing the values of the fitted spatial trend. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{lppm}}, \code{\link{predict.lppm}} } \examples{ fit <- lppm(spiders~x+y) a <- fitted(fit) b <- fitted(fit, dataonly=TRUE) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat.linnet/man/densityQuick.lpp.Rd0000644000176200001440000001025214141460471017717 0ustar liggesusers\name{densityQuick.lpp} \alias{densityQuick.lpp} \title{ Kernel Estimation of Intensity on a Network using a 2D Kernel } \description{ Estimates the intensity of a point process on a linear network using a two-dimensional smoothing kernel. } \usage{ densityQuick.lpp(X, sigma=NULL, \dots, kernel="gaussian", at = c("pixels", "points"), what = c("estimate", "se", "var"), leaveoneout = TRUE, diggle = FALSE, edge2D = FALSE, weights = NULL, positive = FALSE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{sigma}{ Smoothing bandwidth. A single numeric value, in the same units as the coordinates of \code{X}. Alternatively \code{sigma} may be a function which selects a bandwidth when applied to \code{X}, for example, \code{\link{bw.scott.iso}}. } \item{\dots}{ Additional arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{kernel}{ String (partially matched) specifying the smoothing kernel. Current options are \code{"gaussian"}, \code{"epanechnikov"}, \code{"quartic"} or \code{"disc"}. } \item{at}{ String (partially matched) specifying whether to compute the intensity values at a fine grid of locations on the network (\code{at="pixels"}, the default) or only at the points of \code{x} (\code{at="points"}). } \item{what}{ String (partially matched) specifying whether to calculate the intensity estimate, or its estimated standard error, or its estimated variance. } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{diggle}{ Logical value specifying whether to use the \sQuote{Diggle} correction. } \item{edge2D}{ Logical value specifying whether to apply the usual two-dimensional edge correction procedure to the numerator and denominator of the estimate. } \item{weights}{ Optional weights to be attached to the points. A numeric vector, an \code{expression}, or a pixel image. } \item{positive}{ Logical value indicating whether to force the resulting values to be positive. Default is \code{FALSE} for the sake of speed. } } \details{ Kernel smoothing is applied to the points of \code{x} using a two-dimensional Gaussian kernel, as described in Rakshit et al (2019). The result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. Other techniques for kernel smoothing on a network are implemented in \code{\link{density.lpp}}. The main advantages of using a two-dimensional kernel are very fast computation and insensitivity to changes in the network geometry. The main disadvantage is that it ignores the connectivity of the network. See Rakshit et al (2019) for further explanation. } \section{Infinite bandwidth}{ If \code{sigma=Inf}, the resulting density estimate is constant over all locations, and is equal to the average density of points per unit length. (If the network is not connected, then this rule is applied separately to each connected component of the network). } \value{ If \code{at="pixels"} (the default), a pixel image on the linear network (object of class \code{"linim"}). If \code{at="points"}, a numeric vector with one entry for each point of \code{x}. } \references{ Rakshit, S., Davies, T., Moradi, M., McSwiggan, G., Nair, G., Mateu, J. and Baddeley, A. (2019) Fast kernel smoothing of point patterns on a large network using 2D convolution. \emph{International Statistical Review} \bold{87} (3) 531--556. DOI: 10.1111/insr.12327. } \author{ Adrian Baddeley, Suman Rakshit and Tilman Davies } \seealso{ \code{\link{density.lpp}}, the main function for density estimation on a network. \code{\link{bw.scott}}, \code{\link{bw.scott.iso}} for bandwidth selection. } \examples{ X <- unmark(chicago) plot(densityQuick.lpp(X, 500)) plot(densityQuick.lpp(X, 500, diggle=TRUE)) plot(densityQuick.lpp(X, bw.scott.iso)) plot(densityQuick.lpp(X, 500, what="se")) } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/linearKcross.inhom.Rd0000644000176200001440000001104314141460471020220 0ustar liggesusers\name{linearKcross.inhom} \alias{linearKcross.inhom} \title{ Inhomogeneous multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdaJ} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. If \code{lambdaI} or \code{lambdaJ} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } K <- linearKcross.inhom(chicago, "assault", "robbery", lamI, lamJ) # using fitted models for the intensity # fit <- lppm(chicago ~marks + x) # K <- linearKcross.inhom(chicago, "assault", "robbery", fit, fit) } \author{ \adrian. } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/nnfromvertex.Rd0000644000176200001440000000250314141460471017206 0ustar liggesusers\name{nnfromvertex} \alias{nnfromvertex} \title{ Nearest Data Point From Each Vertex in a Network } \description{ Given a point pattern on a linear network, for each vertex of the network find the nearest data point. } \usage{ nnfromvertex(X, what = c("dist", "which"), k = 1) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{what}{ Character string specifying whether to return the nearest-neighbour distances, nearest-neighbour identifiers, or both. } \item{k}{ Integer, or integer vector, specifying that the \code{k}th nearest neighbour should be returned. } } \details{ For each vertex (node) of the linear network, this algorithm finds the nearest data point to the vertex, and returns either the distance from the vertex to its nearest neighbour in \code{X}, or the serial number of the nearest neighbour in \code{X}, or both. If \code{k} is an integer, then the \code{k}-th nearest neighbour is found instead. If \code{k} is an integer vector, this is repeated for each integer in \code{k}. } \value{ A numeric vector, matrix, or data frame. } \author{ \adrian. } \seealso{ \code{\link{nndist.lpp}} } \examples{ X <- runiflpp(5, simplenet) nnfromvertex(X) nnfromvertex(X, k=1:3) } \keyword{spatial} \keyword{math} spatstat.linnet/man/rhohat.lpp.Rd0000644000176200001440000004034714141460471016540 0ustar liggesusers\name{rhohat.lpp} \alias{rhohat.lpp} \alias{rhohat.lppm} \concept{Resource Selection Function} \concept{Prospectivity} \title{ Nonparametric Estimate of Intensity as Function of a Covariate } \description{ Computes a nonparametric estimate of the intensity of a point process on a linear network, as a function of a (continuous) spatial covariate. } \usage{ \method{rhohat}{lpp}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) \method{rhohat}{lppm}(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) } \arguments{ \item{object}{ A point pattern on a linear network (object of class \code{"lpp"}), or a fitted point process model on a linear network (object of class \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"} or \code{"linim"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{weights}{ Optional weights attached to the data points. Either a numeric vector of weights for each data point, or a pixel image (object of class \code{"im"}) or a \code{function(x,y)} providing the weights. } \item{method}{ Character string determining the smoothing method. See Details. } \item{horvitz}{ Logical value indicating whether to use Horvitz-Thompson weights. See Details. } \item{smoother}{ Character string determining the smoothing algorithm. See Details. } \item{subset}{ Optional. A spatial window (object of class \code{"owin"}) specifying a subset of the data, from which the estimate should be calculated. } \item{eps,nd,random}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{locfit::\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } \item{positiveCI}{ Logical value. If \code{TRUE}, confidence limits are always positive numbers; if \code{FALSE}, the lower limit of the confidence interval may sometimes be negative. Default is \code{FALSE} if \code{smoother="kernel"} and \code{TRUE} if \code{smoother="local"}. See Details. } \item{breaks}{ Breakpoints for the piecewise-constant function computed when \code{smoother='piecewise'}. Either a vector of numeric values specifying the breakpoints, or a single integer specifying the number of equally-spaced breakpoints. There is a sensible default. } } \details{ This command estimates the relationship between point process intensity and a given spatial covariate. Such a relationship is sometimes called a \emph{resource selection function} (if the points are organisms and the covariate is a descriptor of habitat) or a \emph{prospectivity index} (if the points are mineral deposits and the covariate is a geological variable). This command uses nonparametric methods which do not assume a particular form for the relationship. If \code{object} is a point pattern, and \code{baseline} is missing or null, this command assumes that \code{object} is a realisation of a point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is the resource selection function or prospectivity index. A nonparametric estimator of the function \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a point pattern, and \code{baseline} is given, then the intensity function is assumed to be \deqn{\lambda(u) = \rho(Z(u)) B(u)}{lambda(u) = rho(Z(u)) * B(u)} where \eqn{B(u)} is the baseline intensity at location \eqn{u}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A nonparametric estimator of the relative intensity \eqn{\rho(z)}{rho(z)} is computed. The nonparametric estimation procedure is controlled by the arguments \code{smoother}, \code{method} and \code{horvitz}. The argument \code{smoother} selects the type of estimation technique. \itemize{ \item If \code{smoother="kernel"} (the default) or \code{smoother="local"}, the nonparametric estimator is a \emph{smoothing estimator} of \eqn{\rho(z)}{rho(z)}, effectively a kind of density estimator (Baddeley et al, 2012). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. Confidence bands are also computed, assuming a Poisson point process. See the section on \emph{Smooth estimates}. \item If \code{smoother="increasing"} or \code{smoother="decreasing"}, we use the \emph{nonparametric maximum likelihood estimator} of \eqn{\rho(z)}{rho(z)} described by Sager (1982). This assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. See the section on \emph{Monotone estimates}. \item If \code{smoother="piecewise"}, the estimate of \eqn{\rho(z)}{rho(z)} is piecewise constant. The range of covariate values is divided into several intervals (ranges or bands). The endpoints of these intervals are the breakpoints, which may be specified by the argument \code{breaks}; there is a sensible default. The estimate of \eqn{\rho(z)}{rho(z)} takes a constant value on each interval. The estimate of \eqn{\rho(z)}{rho(z)} in each interval of covariate values is simply the average intensity (number of points per unit length) in the relevant sub-region of the network. } See Baddeley (2018) for a comparison of these estimation techniques for two-dimensional point patterns. If the argument \code{weights} is present, then the contribution from each data point \code{X[i]} to the estimate of \eqn{\rho}{rho} is multiplied by \code{weights[i]}. If the argument \code{subset} is present, then the calculations are performed using only the data inside this spatial region. This technique assumes that \code{covariate} has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. % For a categorical covariate, use % \code{\link{intensity.quadratcount}} applied to the result of % \code{\link{quadratcount}(X, tess=covariate)}. The argument \code{covariate} should be a pixel image, or a function, or one of the strings \code{"x"} or \code{"y"} signifying the cartesian coordinates. It will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{eps,nd,random}. The argument \code{nd} specifies the total number of test locations on the linear network, \code{eps} specifies the linear separation between test locations, and \code{random} specifies whether the test locations have a randomised starting position. } \section{Smooth estimates}{ Smooth estimators of \eqn{\rho(z)}{rho(z)} were proposed by Baddeley and Turner (2005) and Baddeley et al (2012). Similar estimators were proposed by Guan (2008) and in the literature on relative distributions (Handcock and Morris, 1999). The estimated function \eqn{\rho(z)}{rho(z)} will be a smooth function of \eqn{z}. The smooth estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{locfit::\link[locfit]{locfit}}. } The argument \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates, The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. See Baddeley et al (2012), equation (8). This is similar but not identical to an estimator proposed by Guan (2008). \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. See Baddeley et al (2012), equation (9). \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. See Baddeley et al (2012), equation (10). } If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Pointwise confidence intervals for the true value of \eqn{\rho(z)} are also calculated for each \eqn{z}, and will be plotted as grey shading. The confidence intervals are derived using the central limit theorem, based on variance calculations which assume a Poisson point process. If \code{positiveCI=FALSE}, the lower limit of the confidence interval may sometimes be negative, because the confidence intervals are based on a normal approximation to the estimate of \eqn{\rho(z)}. If \code{positiveCI=TRUE}, the confidence limits are always positive, because the confidence interval is based on a normal approximation to the estimate of \eqn{\log(\rho(z))}{log(\rho(z))}. For consistency with earlier versions, the default is \code{positiveCI=FALSE} for \code{smoother="kernel"} and \code{positiveCI=TRUE} for \code{smoother="local"}. } \section{Monotone estimates}{ The nonparametric maximum likelihood estimator of a monotone function \eqn{\rho(z)}{rho(z)} was described by Sager (1982). This method assumes that \eqn{\rho(z)}{rho(z)} is either an increasing function of \eqn{z}, or a decreasing function of \eqn{z}. The estimated function will be a step function, increasing or decreasing as a function of \eqn{z}. This estimator is chosen by specifying \code{smoother="increasing"} or \code{smoother="decreasing"}. The argument \code{method} is ignored this case. To compute the estimate of \eqn{\rho(z)}{rho(z)}, the algorithm first computes several primitive step-function estimates, and then takes the maximum of these primitive functions. If \code{smoother="decreasing"}, each primitive step function takes the form \eqn{\rho(z) = \lambda}{rho(z) = lambda} when \eqn{z \le t}, and \eqn{\rho(z) = 0}{rho(z) = 0} when \eqn{z > t}, where and \eqn{\lambda}{lambda} is a primitive estimate of intensity based on the data for \eqn{Z \le t}{Z <= t}. The jump location \eqn{t} will be the value of the covariate \eqn{Z} at one of the data points. The primitive estimate \eqn{\lambda}{lambda} is the average intensity (number of points divided by area) for the region of space where the covariate value is less than or equal to \eqn{t}. If \code{horvitz=TRUE}, then the calculations described above are modified by using Horvitz-Thompson weighting. The contribution to the numerator from each data point is weighted by the reciprocal of the baseline value or fitted intensity value at that data point; and a corresponding adjustment is made to the denominator. Confidence intervals are not available for the monotone estimators. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} (and confidence limits) for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A. (2018) A statistical commentary on mineral prospectivity analysis. Chapter 2, pages 25--65 in \emph{Handbook of Mathematical Geosciences: Fifty Years of IAMG}, edited by B.S. Daya Sagar, Q. Cheng and F.P. Agterberg. Springer, Berlin. Guan, Y. (2008) On consistent nonparametric intensity estimation for inhomogeneous spatial point processes. \emph{Journal of the American Statistical Association} \bold{103}, 1238--1247. Handcock, M.S. and Morris, M. (1999) \emph{Relative Distribution Methods in the Social Sciences}. Springer, New York. Sager, T.W. (1982) Nonparametric maximum likelihood estimation of spatial patterns. \emph{Annals of Statistics} \bold{10}, 1125--1136. } \author{ Smoothing algorithm by \adrian, Ya-Mei Chang, Yong Song, and \rolf. Nonparametric maximum likelihood algorithm by \adrian. } \seealso{ \code{\link[spatstat.core]{rho2hat}}, \code{\link[spatstat.core]{methods.rhohat}}, \code{\link[spatstat.core]{parres}}. See \code{\link{lppm}} for a parametric method for the same problem. } \examples{ Y <- runiflpp(30, simplenet) rhoY <- rhohat(Y, "y") ## do spiders prefer to be in the middle of a segment? teepee <- linfun(function(x,y,seg,tp){ tp }, domain(spiders)) rhotee <- rhohat(spiders, teepee) } \keyword{spatial} \keyword{models} \keyword{nonparametric} spatstat.linnet/man/superimpose.lpp.Rd0000644000176200001440000000501614141460471017620 0ustar liggesusers\name{superimpose.lpp} \alias{superimpose.lpp} \title{Superimpose Several Point Patterns on Linear Network} \description{ Superimpose any number of point patterns on the same linear network. } \usage{ \method{superimpose}{lpp}(\dots, L=NULL) } \arguments{ \item{\dots}{ Any number of arguments, each of which represents a point pattern on the same linear network. Each argument can be either an object of class \code{"lpp"}, giving both the spatial coordinates of the points and the linear network, or a \code{list(x,y)} or \code{list(x,y,seg,tp)} giving just the spatial coordinates of the points. } \item{L}{ Optional. The linear network. An object of class \code{"linnet"}. This argument is required if none of the other arguments is of class \code{"lpp"}. } } \value{ An object of class \code{"lpp"} representing the combined point pattern on the linear network. } \details{ This function is used to superimpose several point patterns on the same linear network. It is a method for the generic function \code{\link{superimpose}}. Each of the arguments \code{\dots} can be either a point pattern on a linear network (object of class \code{"lpp"} giving both the spatial coordinates of the points and the linear network), or a \code{list(x,y)} or \code{list(x,y,seg,tp)} giving just the spatial coordinates of the points. These arguments must represent point patterns on the \emph{same} linear network. The argument \code{L} is an alternative way to specify the linear network, and is required if none of the arguments \code{\dots} is an object of class \code{"lpp"}. The arguments \code{\dots} may be \emph{marked} patterns. The marks of each component pattern must have the same format. Numeric and character marks may be ``mixed''. If there is such mixing then the numeric marks are coerced to character in the combining process. If the mark structures are all data frames, then these data frames must have the same number of columns and identical column names. If the arguments \code{\dots} are given in the form \code{name=value}, then the \code{name}s will be used as an extra column of marks attached to the elements of the corresponding patterns. } \seealso{ \code{\link{superimpose}} } \examples{ X <- rpoislpp(5, simplenet) Y <- rpoislpp(10, simplenet) superimpose(X,Y) # not marked superimpose(A=X, B=Y) # multitype with types A and B } \author{\adrian \rolf \ege and Greg McSwiggan. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/points.lpp.Rd0000644000176200001440000000364714141460471016571 0ustar liggesusers\name{points.lpp} \alias{points.lpp} \title{ Draw Points on Existing Plot } \description{ For a point pattern on a linear network, this function draws the coordinates of the points only, on the existing plot display. } \usage{ \method{points}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{points.default}}. } } \details{ This is a method for the generic function \code{\link[graphics]{points}} for the class \code{"lpp"} of point patterns on a linear network. If \code{x} is a point pattern on a linear network, then \code{points(x)} plots the spatial coordinates of the points only, on the existing plot display, without plotting the underlying network. It is an error to call this function if a plot has not yet been initialised. The spatial coordinates are extracted and passed to \code{\link[graphics]{points.default}} along with any extra arguments. Arguments controlling the colours and the plot symbols are interpreted by \code{\link[graphics]{points.default}}. For example, if the argument \code{col} is a vector, then the \code{i}th point is drawn in the colour \code{col[i]}. } \section{Difference from plot method}{ The more usual way to plot the points is using \code{\link{plot.lpp}}. For example \code{plot(x)} would plot both the points and the underlying network, while \code{plot(x, add=TRUE)} would plot only the points. The interpretation of arguments controlling the colours and plot symbols is different here: they determine a symbol map, as explained in the help for \code{\link{plot.ppp}}. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link{plot.lpp}}, \code{\link[graphics]{points.default}} } \examples{ plot(Frame(spiders), main="Spiders on a Brick Wall") points(spiders) } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/tile.lengths.Rd0000644000176200001440000000171414141460471017054 0ustar liggesusers\name{tile.lengths} \alias{tile.lengths} \title{Compute Lengths of Tiles in a Tessellation on a Network} \description{ Computes the length of each tile in a tessellation on a linear network. } \usage{ tile.lengths(x) } \arguments{ \item{x}{A tessellation on a linear network (object of class \code{"lintess"}).} } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This command computes the length of each of the tiles that make up the tessellation \code{x}. The result is a numeric vector. } \value{ A numeric vector. } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(5, simplenet) A <- lineardirichlet(X) plot(A) tile.lengths(A) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/as.owin.lpp.Rd0000644000176200001440000000477414141460471016635 0ustar liggesusers\name{as.owin.lpp} \alias{as.owin.lpp} \alias{as.owin.lppm} \title{Convert Data on a Network to class owin} \description{ Converts data on a linear network into an object of class \code{"owin"}. } \usage{ \method{as.owin}{lpp}(W, \dots, fatal=TRUE) \method{as.owin}{lppm}(W, \dots, fatal=TRUE) } \arguments{ \item{W}{ Data specifying an observation window, in any of several formats described under \emph{Details} below. } \item{fatal}{ Logical value determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"owin"} (see \code{\link[spatstat.geom]{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link[spatstat.geom]{owin.object}} for an overview. The function \code{\link[spatstat.geom]{as.owin}} converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{\link[spatstat.geom]{as.owin}} is generic, with methods for different classes of objects, and a default method. A long list of methods for \code{\link[spatstat.geom]{as.owin}} is documented in the help file for \code{\link[spatstat.geom]{as.owin}} in the \pkg{spatstat.geom} package. This help file documents additional methods applicable when \code{W} is \itemize{ \item an object of class \code{"lpp"} representing a point pattern on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item an object of class \code{"lppm"} representing a fitted point process model on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. } If the argument \code{W} cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). } \seealso{ \code{\link[spatstat.geom]{as.owin}}, \code{\link[spatstat.geom]{owin.object}}, \code{\link[spatstat.geom]{owin}}. Additional methods for \code{as.owin} are provided in the \pkg{maptools} package: \code{as.owin.SpatialPolygon}, \code{as.owin.SpatialPolygons}, \code{as.owin.SpatialPolygonsDataFrame}. } \examples{ as.owin(simplenet) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/linearpcfinhom.Rd0000644000176200001440000001213414141460471017450 0ustar liggesusers\name{linearpcfinhom} \alias{linearpcfinhom} \title{ Inhomogeneous Linear Pair Correlation Function } \description{ Computes an estimate of the inhomogeneous linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcfinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update = TRUE, leaveoneout = TRUE, ratio = FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points, raised to \code{normpower}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See explanation in \code{\link{linearKinhom}}. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"lppm"} or \code{"ppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.lppm}} or \code{\link{update.ppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{fitted.lppm}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity, when \code{lambda} is a fitted model. Supported only when \code{update=TRUE}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the inhomogeneous version of the linear pair correlation function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous pair correlation function \code{\link{linearpcf}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{lambda} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The bandwidth for smoothing the pairwise distances is determined by arguments \code{\dots} passed to \code{\link{density.default}}, mainly the arguments \code{bw} and \code{adjust}. The default is to choose the bandwidth by Silverman's rule of thumb \code{bw="nrd0"} explained in \code{\link{density.default}}. } \value{ Function value table (object of class \code{"fv"}). If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{g(r)}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearpcf}}, \code{\link{linearKinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearpcfinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/Extract.linnet.Rd0000644000176200001440000000351314141460471017355 0ustar liggesusers\name{Extract.linnet} \alias{[.linnet} \title{Extract Subset of Linear Network} \description{ Extract a subset of a linear network. } \usage{ \method{[}{linnet}(x, i, \dots, snip=TRUE) } \arguments{ \item{x}{ A linear network (object of class \code{"linnet"}). } \item{i}{ Spatial window defining the subregion. An object of class \code{"owin"}. } \item{snip}{ Logical. If \code{TRUE} (the default), segments of \code{x} which cross the boundary of \code{i} will be cut by the boundary. If \code{FALSE}, these segments will be deleted. } \item{\dots}{Ignored.} } \value{ Another linear network (object of class \code{"linnet"}). } \details{ This function computes the intersection between the linear network \code{x} and the domain specified by \code{i}. This function is a method for the subset operator \code{"["} for linear networks (objects of class \code{"linnet"}). It is provided mainly for completeness. The index \code{i} should be a window. The argument \code{snip} specifies what to do with segments of \code{x} which cross the boundary of \code{i}. If \code{snip=FALSE}, such segments are simply deleted. If \code{snip=TRUE} (the default), such segments are cut into pieces by the boundary of \code{i}, and those pieces which lie inside the window \code{i} are included in the resulting network. } \examples{ p <- par(mfrow=c(1,2), mar=0.2+c(0,0,1,0)) B <- owin(c(0.1,0.7),c(0.19,0.6)) plot(simplenet, main="x[w, snip=TRUE]") plot(simplenet[B], add=TRUE, col="green", lwd=3) plot(B, add=TRUE, border="red", lty=3) plot(simplenet, main="x[w, snip=FALSE]") plot(simplenet[B, snip=FALSE], add=TRUE, col="green", lwd=3) plot(B, add=TRUE, border="red", lty=3) par(p) } \author{ \adrian, \rolf, \ege and Suman Rakshit. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/density.linnet.Rd0000644000176200001440000000361414141460471017424 0ustar liggesusers\name{density.linnet} \alias{density.linnet} \title{Kernel Smoothing of Linear Network} \description{ Compute a kernel smoothed intensity function for the line segments of a linear network. } \usage{ \method{density}{linnet}(x, \dots) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}) } \item{\dots}{ Arguments passed to \code{\link[spatstat.core]{density.psp}} to control the amount of smoothing and the spatial resolution of the result. } } \value{ A pixel image in two dimensions (object of class \code{"im"}) or a numeric vector. } \details{ This is the method for the generic function \code{\link[stats]{density}} for the class \code{"linnet"} (linear networks). The network \code{x} is first converted to a line segment pattern (object of class \code{"psp"}). Then the method \code{\link[spatstat.core]{density.psp}} is applied to the segment pattern. A kernel estimate of the intensity of the line segment pattern is computed. The result is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with the line segments. The intensity of a line segment pattern is the (spatially-varying) amount of segment length per unit area, expressed in the same units as the coordinates of \code{x}. If the units of \code{x} are in metres, then an intensity value of 3 means that there are 3 metres of segment length per square metre of spatial domain. See \code{\link[spatstat.core]{density.psp}} for more details. } \seealso{ \code{\link[spatstat.core]{density.psp}}, \code{\link[spatstat.geom]{im.object}}, \code{\link[stats]{density}}. } \examples{ D <- density(simplenet, 0.1) plot(D) plot(simplenet, add=TRUE, col="white") ## compare with average intensity volume(simplenet)/area(Window(simplenet)) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/simulate.lppm.Rd0000644000176200001440000000403314141460471017243 0ustar liggesusers\name{simulate.lppm} \alias{simulate.lppm} \title{Simulate a Fitted Point Process Model on a Linear Network} \description{ Generates simulated realisations from a fitted Poisson point process model on a linear network. } \usage{ \method{simulate}{lppm}(object, nsim=1, ..., new.coef=NULL, progress=(nsim > 1), drop=FALSE) } \arguments{ \item{object}{ Fitted point process model on a linear network. An object of class \code{"lppm"}. } \item{nsim}{ Number of simulated realisations. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } \item{new.coef}{ New values for the canonical parameters of the model. A numeric vector of the same length as \code{coef(object)}. } \item{\dots}{ Arguments passed to \code{\link{predict.lppm}} to determine the spatial resolution of the image of the fitted intensity used in the simulation. } \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE}, the result will be a point pattern, rather than a list containing a point pattern. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"lppm"} of fitted point process models on a linear network. Only Poisson process models are supported so far. Simulations are performed by \code{\link{rpoislpp}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"lpp"}) on the same linear network as the original data used to fit the model. The result also belongs to the class \code{"solist"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ fit <- lppm(unmark(chicago) ~ y) simulate(fit)[[1]] } \seealso{ \code{\link{lppm}}, \code{\link{rpoislpp}}, \code{\link[stats]{simulate}} } \author{\adrian , \rolf and \ege } \keyword{spatial} \keyword{models} spatstat.linnet/man/Extract.lpp.Rd0000644000176200001440000000655014141460471016663 0ustar liggesusers\name{Extract.lpp} \alias{[.lpp} \title{Extract Subset of Point Pattern on Linear Network} \description{ Extract a subset of a point pattern on a linear network. } \usage{ \method{[}{lpp}(x, i, j, drop=FALSE, \dots, snip=TRUE) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained. } \item{j}{ Spatial window (object of class \code{"owin"}) delineating the region that should be retained. } \item{drop}{ Logical value indicating whether to remove unused levels of the marks, if the marks are a factor. } \item{snip}{ Logical. If \code{TRUE} (the default), segments of the network which cross the boundary of the window \code{j} will be cut by the boundary. If \code{FALSE}, these segments will be deleted. } \item{\dots}{ Ignored. } } \value{ A point pattern on a linear network (of class \code{"lpp"}). } \details{ This function extracts a designated subset of a point pattern on a linear network. The function \code{[.lpp} is a method for \code{\link{[}} for the class \code{"lpp"}. It extracts a designated subset of a point pattern. The argument \code{i} should be a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The argument \code{j}, if present, should be a spatial window. The pattern inside the region will be retained. \emph{Line segments that cross the boundary of the window are deleted} in the current implementation. The argument \code{drop} determines whether to remove unused levels of a factor, if the point pattern is multitype (i.e. the marks are a factor) or if the marks are a data frame or hyperframe in which some of the columns are factors. The argument \code{snip} specifies what to do with segments of the network which cross the boundary of the window \code{j}. If \code{snip=FALSE}, such segments are simply deleted. If \code{snip=TRUE} (the default), such segments are cut into pieces by the boundary of \code{j}, and those pieces which lie inside the window \code{ji} are included in the resulting network. Use \code{\link{unmark}} to remove all the marks in a marked point pattern, and \code{\link{subset.lpp}} to remove only some columns of marks. } \seealso{ \code{\link{lpp}}, \code{\link{subset.lpp}} } \examples{ # Chicago crimes data - remove cases of assault chicago[marks(chicago) != "assault"] # equivalent to subset(chicago, select=-assault) # spatial window subset B <- owin(c(350, 700), c(600, 1000)) plot(chicago) plot(B, add=TRUE, lty=2, border="red", lwd=3) op <- par(mfrow=c(1,2), mar=0.6+c(0,0,1,0)) plot(B, main="chicago[B, snip=FALSE]", lty=3, border="red") plot(chicago[, B, snip=FALSE], add=TRUE) plot(B, main="chicago[B, snip=TRUE]", lty=3, border="red") plot(chicago[, B, snip=TRUE], add=TRUE) par(op) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.linnet/man/begins.Rd0000644000176200001440000000147414141460471015726 0ustar liggesusers\name{begins} \alias{begins} \title{ Check Start of Character String } \description{ Checks whether a character string begins with a particular prefix. } \usage{ begins(x, firstbit) } \arguments{ \item{x}{ Character string, or vector of character strings, to be tested. } \item{firstbit}{ A single character string. } } \details{ This simple wrapper function checks whether (each entry in) \code{x} begins with the string \code{firstbit}, and returns a logical value or logical vector with one entry for each entry of \code{x}. This function is useful mainly for reducing complexity in model formulae. } \value{ Logical vector of the same length as \code{x}. } \author{ \adrian \rolf and \ege } \examples{ begins(c("Hello", "Goodbye"), "Hell") begins("anything", "") } \keyword{character} spatstat.linnet/man/methods.linnet.Rd0000644000176200001440000001075514141460471017414 0ustar liggesusers\name{methods.linnet} \alias{methods.linnet} %DoNotExport \Rdversion{1.1} \alias{as.linnet} \alias{as.linnet.linnet} \alias{as.owin.linnet} \alias{as.psp.linnet} \alias{nsegments.linnet} \alias{nvertices.linnet} \alias{pixellate.linnet} \alias{print.linnet} \alias{summary.linnet} \alias{unitname.linnet} \alias{unitname<-.linnet} \alias{vertexdegree} \alias{vertices.linnet} \alias{volume.linnet} \alias{Window.linnet} \title{ Methods for Linear Networks } \description{ These are methods for the class \code{"linnet"} of linear networks. } \usage{ as.linnet(X, \dots) \method{as.linnet}{linnet}(X, \dots, sparse, maxsize=30000) \method{as.owin}{linnet}(W, \dots) \method{as.psp}{linnet}(x, \dots, fatal=TRUE) \method{nsegments}{linnet}(x) \method{nvertices}{linnet}(x, \dots) \method{pixellate}{linnet}(x, \dots) \method{print}{linnet}(x, \dots) \method{summary}{linnet}(object, \dots) \method{unitname}{linnet}(x) \method{unitname}{linnet}(x) <- value vertexdegree(x) \method{vertices}{linnet}(w) \method{volume}{linnet}(x) \method{Window}{linnet}(X, \dots) } \arguments{ \item{x,X,object,w,W}{ An object of class \code{"linnet"} representing a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ A valid name for the unit of length for \code{x}. See \code{\link{unitname}}. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } \item{sparse}{ Logical value indicating whether to use a sparse matrix representation, as explained in \code{\link{linnet}}. Default is to keep the same representation as in \code{X}. } \item{maxsize}{ Maximum permitted number of network vertices (to prevent a system crash due to lack of memory) when creating a network with \code{sparse=FALSE}. } } \details{ The function \code{as.linnet} is generic. It converts data from some other format into an object of class \code{"linnet"}. The method \code{as.linnet.lpp} extracts the linear network information from an \code{lpp} object. The method \code{as.linnet.linnet} converts a linear network into another linear network with the required format. The other functions are methods for the generic commands \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{nsegments}}, \code{\link{nvertices}}, \code{\link{pixellate}}, \code{\link[base]{print}}, \code{\link[base]{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}}, \code{\link{vertices}}, \code{\link{volume}} and \code{\link{Window}} for the class \code{"linnet"}. The methods \code{as.owin.linnet} and \code{Window.linnet} extract the window containing the linear network, and return it as an object of class \code{"owin"}. The method \code{as.psp.linnet} extracts the lines of the linear network as a line segment pattern (object of class \code{"psp"}) while \code{nsegments.linnet} simply counts the number of line segments. The method \code{vertices.linnet} extracts the vertices (nodes) of the linear network and \code{nvertices.linnet} simply counts the vertices. The function \code{vertexdegree} calculates the topological degree of each vertex (the number of lines emanating from that vertex) and returns these values as an integer vector. The method \code{pixellate.linnet} applies \code{\link{as.psp.linnet}} to convert the network to a collection of line segments, then invokes \code{\link{pixellate.psp}}. } \value{ For \code{as.linnet} the value is an object of class \code{"linnet"}. For other functions, see the help file for the corresponding generic function. } \author{ \adrian } \seealso{ \code{\link{linnet}}. Generic functions: \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{nsegments}}, \code{\link{nvertices}}, \code{\link{pixellate}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}}, \code{\link{vertices}}, \code{\link{volume}} and \code{\link{Window}}. Special tools: \code{\link{thinNetwork}}, \code{\link{insertVertices}}, \code{\link{joinVertices}}, \code{\link{connected.linnet}}. \code{\link{lixellate}} for dividing segments into shorter segments. } \examples{ simplenet summary(simplenet) nsegments(simplenet) nvertices(simplenet) pixellate(simplenet) volume(simplenet) unitname(simplenet) <- c("cubit", "cubits") Window(simplenet) } \keyword{spatial} \keyword{methods} spatstat.linnet/man/diameter.linnet.Rd0000644000176200001440000000247514141460471017543 0ustar liggesusers\name{diameter.linnet} \alias{boundingradius.linnet} \alias{diameter.linnet} \title{ Diameter and Bounding Radius of a Linear Network } \description{ Compute the diameter or bounding radius of a linear network measured using the shortest path distance. } \usage{ \method{diameter}{linnet}(x) \method{boundingradius}{linnet}(x, \dots) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Ignored.} } \details{ The diameter of a linear network (in the shortest path distance) is the maximum value of the shortest-path distance between any two points \eqn{u} and \eqn{v} on the network. The bounding radius of a linear network (in the shortest path distance) is the minimum value, over all points \eqn{u} on the network, of the maximum shortest-path distance from \eqn{u} to another point \eqn{v} on the network. The functions \code{\link{boundingradius}} and \code{\link{diameter}} are generic; the functions \code{boundingradius.linnet} and \code{diameter.linnet} are the methods for objects of class \code{linnet}. } \value{ A single numeric value. } \author{ \adrian } \seealso{ \code{\link{boundingradius}}, \code{\link{diameter}}, \code{\link{linnet}} } \examples{ diameter(simplenet) boundingradius(simplenet) } \keyword{spatial} \keyword{math} spatstat.linnet/man/spatstat.linnet-package.Rd0000644000176200001440000002054314141460471021201 0ustar liggesusers\name{spatstat.linnet-package} \alias{spatstat.linnet-package} \alias{spatstat.linnet} \docType{package} \title{The spatstat.linnet Package} \description{ The \pkg{spatstat.linnet} package belongs to the \pkg{spatstat} family of packages. It contains the functionality for analysing spatial data on a linear network. } \details{ \pkg{spatstat} is a family of \R packages for the statistical analysis of spatial data. Its main focus is the analysis of spatial patterns of points in two-dimensional space. The original \pkg{spatstat} package has now been split into several sub-packages. This sub-package \pkg{spatstat.linnet} contains the user-level functions from \pkg{spatstat} that are concerned with spatial data on a linear network. } \section{Structure of the spatstat family}{ The orginal \pkg{spatstat} package grew to be very large. It has now been divided into several \bold{sub-packages}: \itemize{ \item \pkg{spatstat.utils} containing basic utilities \item \pkg{spatstat.sparse} containing linear algebra utilities \item \pkg{spatstat.data} containing datasets \item \pkg{spatstat.geom} containing geometrical objects and geometrical operations \item \pkg{spatstat.core} containing the main functionality for statistical analysis and modelling of spatial data \item \pkg{spatstat.linnet} containing functions for spatial data on a linear network \item \pkg{spatstat}, which simply loads the other sub-packages listed above, and provides documentation. } When you install \pkg{spatstat}, these sub-packages are also installed. Then if you load the \pkg{spatstat} package by typing \code{library(spatstat)}, the other sub-packages listed above will automatically be loaded or imported. For an overview of all the functions available in these sub-packages, see the help file for \pkg{spatstat} in the \pkg{spatstat} package, Additionally there are several \bold{extension packages:} \itemize{ \item \pkg{spatstat.gui} for interactive graphics \item \pkg{spatstat.local} for local likelihood (including geographically weighted regression) \item \pkg{spatstat.Knet} for additional, computationally efficient code for linear networks \item \pkg{spatstat.sphere} (under development) for spatial data on a sphere, including spatial data on the earth's surface } The extension packages must be installed separately and loaded explicitly if needed. They also have separate documentation. } \section{Overview of functionality}{ Here is a list of the main functionality in \pkg{spatstat.linnet}. \bold{Point patterns on a linear network} An object of class \code{"linnet"} represents a linear network (for example, a road network). \tabular{ll}{ \code{\link{linnet}} \tab create a linear network \cr \code{\link{clickjoin}} \tab interactively join vertices in network \cr \code{spatstat.gui::iplot.linnet} \tab interactively plot network \cr \code{\link[spatstat.data]{simplenet}} \tab simple example of network \cr \code{\link{lineardisc}} \tab disc in a linear network \cr \code{\link{delaunayNetwork}} \tab network of Delaunay triangulation \cr \code{\link{dirichletNetwork}} \tab network of Dirichlet edges \cr \code{\link{methods.linnet}} \tab methods for \code{linnet} objects\cr \code{\link{vertices.linnet}} \tab nodes of network \cr \code{\link{joinVertices}} \tab join existing vertices in a network \cr \code{\link{insertVertices}} \tab insert new vertices at positions along a network \cr \code{\link{addVertices}} \tab add new vertices, extending a network \cr \code{\link{thinNetwork}} \tab remove vertices or lines from a network \cr \code{\link{repairNetwork}} \tab repair internal format \cr \code{\link{pixellate.linnet}} \tab approximate by pixel image } An object of class \code{"lpp"} represents a point pattern on a linear network (for example, road accidents on a road network). \tabular{ll}{ \code{\link{lpp}} \tab create a point pattern on a linear network \cr \code{\link{methods.lpp}} \tab methods for \code{lpp} objects \cr \code{\link{subset.lpp}} \tab method for \code{subset} \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network \cr \code{\link[spatstat.data]{chicago}} \tab Chicago crime data \cr \code{\link[spatstat.data]{dendrite}} \tab Dendritic spines data \cr \code{\link[spatstat.data]{spiders}} \tab Spider webs on mortar lines of brick wall } \bold{Summary statistics for a point pattern on a linear network:} These are for point patterns on a linear network (class \code{lpp}). For unmarked patterns: \tabular{ll}{ \code{\link{linearK}} \tab \eqn{K} function on linear network \cr \code{\link{linearKinhom}} \tab inhomogeneous \eqn{K} function on linear network \cr \code{\link{linearpcf}} \tab pair correlation function on linear network \cr \code{\link{linearpcfinhom}} \tab inhomogeneous pair correlation on linear network } For multitype patterns: \tabular{ll}{ \code{\link{linearKcross}} \tab \eqn{K} function between two types of points \cr \code{\link{linearKdot}} \tab \eqn{K} function from one type to any type \cr \code{\link{linearKcross.inhom}} \tab Inhomogeneous version of \code{\link{linearKcross}} \cr \code{\link{linearKdot.inhom}} \tab Inhomogeneous version of \code{\link{linearKdot}} \cr \code{\link{linearmarkconnect}} \tab Mark connection function on linear network \cr \code{\link{linearmarkequal}} \tab Mark equality function on linear network \cr \code{\link{linearpcfcross}} \tab Pair correlation between two types of points \cr \code{\link{linearpcfdot}} \tab Pair correlation from one type to any type \cr \code{\link{linearpcfcross.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfcross}} \cr \code{\link{linearpcfdot.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfdot}} } Related facilities: \tabular{ll}{ \code{\link{pairdist.lpp}} \tab distances between pairs \cr \code{\link{crossdist.lpp}} \tab distances between pairs \cr \code{\link{nndist.lpp}} \tab nearest neighbour distances \cr \code{\link{nncross.lpp}} \tab nearest neighbour distances \cr \code{\link{nnwhich.lpp}} \tab find nearest neighbours \cr \code{\link{nnfun.lpp}} \tab find nearest data point \cr \code{\link{density.lpp}} \tab kernel smoothing estimator of intensity \cr \code{\link{distfun.lpp}} \tab distance transform \cr \code{\link{envelope.lpp}} \tab simulation envelopes \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network } It is also possible to fit point process models to \code{lpp} objects. \bold{Point process models on a linear network:} An object of class \code{"lpp"} represents a pattern of points on a linear network. Point process models can also be fitted to these objects. Currently only Poisson models can be fitted. \tabular{ll}{ \code{\link{lppm}} \tab point process model on linear network \cr \code{\link{anova.lppm}} \tab analysis of deviance for \cr \tab point process model on linear network \cr \code{\link{envelope.lppm}} \tab simulation envelopes for \cr \tab point process model on linear network \cr \code{\link{fitted.lppm}} \tab fitted intensity values \cr \code{\link{predict.lppm}} \tab model prediction on linear network \cr \code{\link{linim}} \tab pixel image on linear network \cr \code{\link{plot.linim}} \tab plot a pixel image on linear network \cr \code{\link{eval.linim}} \tab evaluate expression involving images \cr \code{\link{linfun}} \tab function defined on linear network \cr \code{\link{methods.linfun}} \tab conversion facilities } } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ \spatstatAuthors. } \section{Acknowledgements}{ Ottmar Cronie, Tilman Davies, Greg McSwiggan and Suman Rakshit made substantial contributions of code. } \keyword{spatial} \keyword{package} spatstat.linnet/man/anova.lppm.Rd0000644000176200001440000000733714141460471016536 0ustar liggesusers\name{anova.lppm} \alias{anova.lppm} \title{ANOVA for Fitted Point Process Models on Linear Network} \description{ Performs analysis of deviance for two or more fitted point process models on a linear network. } \usage{ \method{anova}{lppm}(object, \dots, test=NULL) } \arguments{ \item{object}{A fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ One or more fitted point process models on the same linear network. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for fitted point process models on a linear network (objects of class \code{"lppm"}, usually generated by the model-fitting function \code{\link{lppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, then the deviance difference is replaced by the adjusted composite likelihood ratio (Pace et al, 2011; Baddeley et al, 2014). } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This generally occurs when the point process models are fitted on different linear networks. } } } \seealso{ \code{\link{lppm}} } \examples{ X <- runiflpp(10, simplenet) mod0 <- lppm(X ~1) modx <- lppm(X ~x) anova(mod0, modx, test="Chi") } \author{\adrian } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Baddeley, A., Turner, R. and Rubak, E. (2015) Adjusted composite likelihood ratio test for Gibbs point processes. \emph{Journal of Statistical Computation and Simulation} \bold{86} (5) 922--941. DOI: 10.1080/00949655.2015.1044530. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. Pace, L., Salvan, A. and Sartori, N. (2011) Adjusting composite likelihood ratio statistics. \emph{Statistica Sinica} \bold{21}, 129--148. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat.linnet/man/linfun.Rd0000644000176200001440000000444514141460471015753 0ustar liggesusers\name{linfun} \Rdversion{1.1} \alias{linfun} \title{ Function on a Linear Network } \description{ Create a function on a linear network. } \usage{ linfun(f, L) } \arguments{ \item{f}{ A \code{function} in the \R language. } \item{L}{ A linear network (object of class \code{"linnet"}) on which \code{f} is defined. } } \details{ This creates an object of class \code{"linfun"}. This is a simple mechanism for handling a function defined on a linear network, to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language, with formal arguments \code{x,y,seg,tp} (and optional additional arguments) where \code{x,y} are Cartesian coordinates of locations on the linear network, \code{seg, tp} are the local coordinates. The function \code{f} should be vectorised: that is, if \code{x,y,seg,tp} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y,seg,tp)} should be a vector of length \code{n}. \code{L} should be a linear network (object of class \code{"linnet"}) on which the function \code{f} is well-defined. The result is a function \code{g} in the \R language which belongs to the special class \code{"linfun"}. There are several methods for this class including \code{print}, \code{plot} and \code{\link{as.linim}}. This function can be called as \code{g(X)} where \code{X} is an \code{"lpp"} object, or called as \code{g(x,y)} or \code{g(x,y,seg,tp)} where \code{x,y,seg,tp} are coordinates. If the original function \code{f} had additional arguments, then these may be included in the call to \code{g}, and will be passed to \code{f}. } \value{ A function in the \R\ language. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{methods.linfun}} for methods applicable to \code{"linfun"} objects. \code{\link{distfun.lpp}}, \code{\link{nnfun.lpp}}. } \examples{ f <- function(x,y,seg,tp) { x+y } g <- linfun(f, simplenet) plot(g) X <- runiflpp(3, simplenet) g(X) Z <- as.linim(g) f <- function(x,y,seg,tp, mul=1) { mul*(x+y) } g <- linfun(f, simplenet) plot(g) plot(g, mul=10) g(X, mul=10) Z <- as.linim(g, mul=10) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{math} spatstat.linnet/man/bw.relrisklpp.Rd0000644000176200001440000001344414155071513017255 0ustar liggesusers\name{bw.relrisklpp} \alias{bw.relrisklpp} \title{ Cross Validated Bandwidth Selection for Relative Risk Estimation on a Network } \description{ Uses cross-validation to select a smoothing bandwidth for the estimation of relative risk on a linear network. } \usage{ bw.relrisklpp(X, \dots, method = c("likelihood", "leastsquares", "KelsallDiggle", "McSwiggan"), distance=c("path", "euclidean"), hmin = NULL, hmax = NULL, nh = NULL, fast = TRUE, fastmethod = "onestep", floored = TRUE, reference = c("thumb", "uniform", "sigma"), allow.infinite = TRUE, epsilon = 1e-20, fudge = 0, verbose = FALSE, warn = TRUE) } \arguments{ \item{X}{ A multitype point pattern on a linear network (object of class \code{"lpp"} which has factor-valued marks). } \item{\dots}{ Arguments passed to \code{\link{density.lpp}} to control the resolution of the algorithm. } \item{method}{ Character string (partially matched) determining the cross-validation method. See Details. } \item{distance}{ Character string (partially matched) specifying the type of smoothing kernel. See \code{\link{density.lpp}}. } \item{hmin,hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{nh}{ Number of trial values of smoothing bandwidth \code{sigma} to consider. } \item{fast}{ Logical value specifying whether the leave-one-out density estimates should be computed using a fast approximation (\code{fast=TRUE}, the default) or exactly (\code{fast=FALSE}). } \item{fastmethod, floored}{ Developer use only. } \item{reference}{ Character string (partially matched) specifying the bandwidth for calculating the reference intensities used in the McSwiggan method (modified Kelsall-Diggle method). \code{reference="sigma"} means the maximum bandwidth considered, which is given by the argument \code{sigma}. \code{reference="thumb"} means the bandwidths selected by Scott's rule of thumb \code{\link{bw.scott.iso}}. \code{reference="uniform"} means infinite bandwidth corresponding to uniform intensity. } \item{allow.infinite}{ Logical value indicating whether an infinite bandwidth (corresponding to a constant relative risk) should be permitted as a possible choice of bandwidth. } \item{epsilon}{ A small constant value added to the reference density in some of the cross-validation calculations, to improve performance. } \item{fudge}{ Fudge factor to prevent very small density estimates in the leave-one-out calculation. If \code{fudge > 0}, then the lowest permitted value for a leave-one-out estimate of intensity is \code{fudge/L}, where \code{L} is the total length of the network. } \item{verbose}{ Logical value indicating whether to print progress reports, } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function computes an optimal value of smoothing bandwidth for the nonparametric estimation of relative risk on a linear network using \code{\link{relrisk.lpp}}. The optimal value is found by optimising a cross-validation criterion. The cross-validation criterion is selected by the argument \code{method}: \tabular{ll}{ \code{method="likelihood"} \tab likelihood cross-validation \cr \code{method="leastsquares"} \tab least squares cross-validation \cr \code{method="KelsallDiggle"} \tab Kelsall and Diggle (1995) density ratio cross-validation \cr \code{method="McSwiggan"} \tab McSwiggan et al (2019) modified density ratio cross-validation \cr } See McSwiggan et al (2019) for details. The result is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on the linear network version of Scott's rule \code{\link{bw.scott.iso}}. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. The cross-validation procedure is based on kernel estimates of intensity, which are computed by \code{\link{density.lpp}}. Any arguments \code{...} are passed to \code{\link{density.lpp}} to control the kernel estimation procedure. This includes the argument \code{distance} which specifies the type of kernel. The default is \code{distance="path"}; the fastest option is \code{distance="euclidean"}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \references{ Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. McSwiggan, G., Baddeley, A. and Nair, G. (2019) Estimation of relative risk for events on a linear network. \emph{Statistics and Computing} \bold{30} (2) 469--484. } \author{ Greg McSwiggan and \adrian. } \seealso{ \code{\link{relrisk.lpp}} } \examples{ set.seed(2020) X <- superimpose(A=runiflpp(20, simplenet), B=runifpointOnLines(20, as.psp(simplenet)[1])) plot(bw.relrisklpp(X, hmin=0.1, hmax=0.25, method="McSwiggan")) plot(bw.relrisklpp(X, hmin=0.1, hmax=0.3, nh=8, distance="euclidean")) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/addVertices.Rd0000644000176200001440000000771114141460471016714 0ustar liggesusers\name{addVertices} \alias{addVertices} \title{ Add New Vertices to a Linear Network } \description{ Adds new vertices to a linear network at specified locations outside the network. } \usage{ addVertices(L, X, join=NULL, joinmarks=NULL) } \arguments{ \item{L}{ Existing linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{X}{ Point pattern (object of class \code{"ppp"}) specifying the new vertices. } \item{join}{ Optional information specifying how to join the new vertices \code{X} to the existing network. See Details. If \code{join=NULL} (the default), the new vertices are simply added to the list of network vertices without being joined to the rest of the network. } \item{joinmarks}{ Optional vector or data frame of marks associated with the new edges specified by \code{join}. } } \details{ This function adds new vertices to an existing linear network \code{L}, at specified locations \code{X} outside the network. The argument \code{L} can be either a linear network (class \code{"linnet"}) or some other object that includes a linear network. The new vertex locations are points outside the network, specified as a point pattern \code{X} (object of class \code{"ppp"}). The argument \code{join} specifies how to join the new vertices to the existing network. \itemize{ \item If \code{join=NULL} (the default), the new vertices are simply added to the list of network vertices without being joined to the rest of the network. \item If \code{join} is a vector of integers, then these are taken to be indices of existing vertices of \code{L} in the order given in \code{V = vertices(L)}. Then each new vertex \code{X[i]} will be joined to an existing vertex \code{V[j]} where \code{j = join[i]}. Each new vertex is joined to exactly one existing vertex. \item If \code{join="vertices"} then each new vertex \code{X[i]} is joined to the nearest existing vertex \code{V[j]}. Each new vertex is joined to exactly one existing vertex. \item If \code{join="nearest"} then each new vertex is projected to the nearest location along on the network; these locations are inserted as new vertices of \code{L}; and then each vertex \code{X[i]} is joined to the corresponding projected point. Each new vertex is joined to exactly one newly-inserted vertex. \item If \code{join} is a point pattern on a network (class \code{"lpp"}), it must be defined on the same network as \code{L} and it must consist of the same number of points as \code{X}. The points of \code{join} will be inserted as new vertices of \code{L}, and then each vertex \code{X[i]} is joined to the corresponding point \code{join[i]}. Each new vertex is joined to exactly one newly-inserted vertex. } The result is the modified object, with an attribute \code{"id"} such that the \code{i}th added vertex has become the \code{id[i]}th vertex of the new network. } \value{ An object of the same class as \code{L} representing the result of adding the new vertices. The result also has an attribute \code{"id"} as described in Details. } \author{ Adrian Baddeley } \seealso{ \code{\link{insertVertices}} to insert vertices along an existing network. \code{\link{as.lpp}}, \code{\link{linnet}}, \code{\link{methods.linnet}}, \code{\link{joinVertices}}, \code{\link{thinNetwork}}. } \examples{ opa <- par(mfrow=c(1,3)) L <- simplenet X <- runifpoint(20, Window(simplenet)) plot(L) plot(X, add=TRUE, cols="green", pch=16, cex=2) plot(addVertices(L, X, "nearest"), col="red") plot(L, add=TRUE, col="grey", lwd=3) plot(X, add=TRUE, cols="green", pch=16, cex=2) plot(addVertices(L, X, "vertices"), col="red") plot(L, add=TRUE, col="grey", lwd=3) plot(X, add=TRUE, cols="green", pch=16, cex=2) par(opa) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/intersect.lintess.Rd0000644000176200001440000000203214141460471020126 0ustar liggesusers\name{intersect.lintess} \alias{intersect.lintess} \title{ Intersection of Tessellations on a Linear Network } \description{ Yields the intersection (common refinement) of two tessellations on a linear network. } \usage{ intersect.lintess(X, Y) } \arguments{ \item{X,Y}{ Tessellations (objects of class \code{"lintess"}) on the same linear network. } } \value{ Another tessellation (object of class \code{"lintess"}) on the same linear network as \code{X} and \code{Y}. } \details{ \code{X} and \code{Y} should be tessellations defined on the same linear network. Each tile in the resulting tessellation is the intersection of a tile of \code{X} with a tile of \code{Y}. } \author{ \adrian. } \seealso{ \code{\link{lintess}}, \code{\link{divide.linnet}}, \code{\link{chop.linnet}} } \examples{ X <- divide.linnet(runiflpp(4, simplenet)) Y <- divide.linnet(runiflpp(3, simplenet)) opa <- par(mfrow=c(1,3)) plot(X) plot(Y) plot(intersect.lintess(X,Y)) par(opa) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/plot.lintess.Rd0000644000176200001440000000765714141460471017126 0ustar liggesusers\name{plot.lintess} \alias{plot.lintess} \title{ Plot a Tessellation on a Linear Network } \description{ Plot a tessellation or division of a linear network into tiles. } \usage{ \method{plot}{lintess}(x, \dots, main, add = FALSE, style = c("colour", "width", "image"), col = NULL, values=marks(x), ribbon=TRUE, ribargs=list(), multiplot=TRUE, do.plot=TRUE) } \arguments{ \item{x}{ Tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{ Arguments passed to \code{\link[graphics]{segments}} (if \code{style="segments"}) or to \code{\link{plot.im}} (if \code{style="image"}) to control the plot. } \item{main}{ Optional main title for the plot. } \item{add}{ Logical value indicating whether the plot is to be added to an existing plot. } \item{style}{ Character string (partially matched) specifying the type of plot. If \code{style="colour"} (the default), tiles are plotted using \code{\link[graphics]{segments}} using colours to distinguish the different tiles or values. If \code{style="width"}, tiles are plotted using \code{\link[graphics]{segments}} using different segment widths to distinguish the different tiles or values. If \code{style="image"}, the tessellation is converted to a pixel image and plotted by \code{\link{plot.im}}. } \item{col}{ Vector of colours, or colour map, determining the colours used to plot the different tiles of the tessellation. } \item{values}{ Values associated with each tile of the tessellation, used to determine the colours or widths. A vector with one entry for each tile, or a data frame with one row for each tile. The default is \code{marks(x)}, or if that is null, then \code{tilenames(x)}. } \item{ribbon}{ Logical value specifying whether to print an explanatory legend for the colour map or width map. } \item{ribargs}{ Arguments passed to \code{\link{plot.colourmap}} controlling the display of the colour map legend. } \item{multiplot}{ Logical value determining what should happen if \code{marks(x)} has more than one column. If \code{multiplot=TRUE} (the default), several plot panels will be generated, one panel for each column of marks. If \code{multiplot=FALSE}, the first column of marks will be selected. } \item{do.plot}{ Logical value specifying whether to actually generate the plot (\code{do.plot=TRUE}, the default) or just to compute the colour map and return it (\code{do.plot=FALSE}). } } \details{ A tessellation on a linear network \code{L} is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This function plots the tessellation on the current device. It is a method for the generic \code{plot}. If \code{style="colour"}, each tile is plotted using \code{\link[graphics]{segments}}, drawing segments of different colours. If \code{style="width"}, each tile is plotted using \code{\link[graphics]{segments}}, drawing segments of different widths. If \code{style="image"}, the tessellation is converted to a pixel image, and plotted as a colour image using \code{\link{plot.im}}. The colours or widths are determined by the \code{values} associated with each tile of the tessellation. If \code{values} is missing, the default is to use the marks of the tessellation, or if there are no marks, the names of the tiles. } \value{ (Invisible) colour map. } \author{ \adrian } \seealso{ \code{\link{lintess}} } \examples{ X <- runiflpp(7, simplenet) Z <- divide.linnet(X) plot(Z, main="tessellation on network") points(as.ppp(X)) plot(Z, main="tessellation on network", values=1:nobjects(Z), style="w") } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/rcelllpp.Rd0000644000176200001440000000556214141460471016276 0ustar liggesusers\name{rcelllpp} \alias{rcelllpp} \title{ Simulate Cell Process on Linear Network } \description{ Generate a realisation of the cell process on a linear network. } \usage{ rcelllpp(L, lambda, rnumgen = NULL, \dots, saveid=FALSE) } \arguments{ \item{L}{ Either a linear network (object of class \code{"linnet"}) or a tessellation on a linear network (object of class \code{"lintess"}). } \item{lambda}{ Intensity of the process (expected number of points per unit length), } \item{rnumgen}{ Optional. Random number generator for the number of points in each cell. } \item{\dots}{ Additional arguments to \code{rnumgen}. } \item{saveid}{ Logical value indicating whether to save information about cell membership. } } \details{ This function generates simulated realisations of a cell point process on a network, as described in Baddeley et al (2017). This is the analogue on a linear network of the two-dimensional cell point process of Baddeley and Silverman (1988). The argument \code{L} should be a tessellation on a linear network. Alternatively if \code{L} is a linear network, it is converted to a tessellation by treating each network segment as a tile in the tessellation. The cell process generates a point process by generating independent point processes inside each tile of the tessellation. Within each tile, given the number of random points in the tile, the points are independent and uniformly distributed within the tile. By default (when \code{rnumgen} is not given), the number of points in a tile of length \code{t} is a random variable with mean and variance equal to \code{lambda * t}, generated by calling \code{\link{rcellnumber}}. If \code{rnumgen} is given, it should be a function with arguments \code{rnumgen(n, mu, \dots)} where \code{n} is the number of random integers to be generated, \code{mu} is the mean value of the distribution, and \code{\dots} are additional arguments, if needed. It will be called in the form \code{rnumgen(1, lambda * t, \dots)} to determine the number of random points falling in each tile of length \code{t}. } \value{ Point pattern on a linear network (object of class \code{"lpp"}). If \code{saveid=TRUE}, the result has an attribute \code{"cellid"} which is a factor specifying the cell that contains each point. } \author{ \adrian. } \seealso{ \code{\link{rSwitzerlpp}} } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. Baddeley, A., Nair, G., Rakshit, S. and McSwiggan, G. (2017) \sQuote{Stationary} point processes are uncommon on linear networks. \emph{STAT} \bold{6}, {68--78}. } \examples{ X <- rcelllpp(domain(spiders), 0.01) plot(X) plot(linearK(X)) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/Replace.linim.Rd0000644000176200001440000000345414141460471017141 0ustar liggesusers\name{Replace.linim} \alias{[<-.linim} \title{Reset Values in Subset of Image on Linear Network} \description{ Reset the values in a subset of a pixel image on a linear network. } \usage{ \method{[}{linim}(x, i, j) <- value } \arguments{ \item{x}{ A pixel image on a linear network. An object of class \code{"linim"}. } \item{i}{ Object defining the subregion or subset to be replaced. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is appropriate to some sort of replacement \emph{other than} matrix indexing. } \item{value}{ Vector, matrix, factor or pixel image containing the replacement values. Short vectors will be recycled. } } \value{ The image \code{x} with the values replaced. } \details{ This function changes some of the pixel values in a pixel image. The image \code{x} must be an object of class \code{"linim"} representing a pixel image on a linear network. The pixel values are replaced according to the rules described in the help for \code{\link{[<-.im}}. Then the auxiliary data are updated. } \seealso{ \code{\link{[<-.im}}. } \examples{ # make a function Y <- as.linim(distfun(runiflpp(5, simplenet))) # replace some values B <- square(c(0.25, 0.55)) Y[B] <- 2 plot(Y, main="") plot(B, add=TRUE, lty=3) X <- runiflpp(4, simplenet) Y[X] <- 5 } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} spatstat.linnet/man/linearK.Rd0000644000176200001440000000406414141460471016042 0ustar liggesusers\name{linearK} \alias{linearK} \title{ Linear K Function } \description{ Computes an estimate of the linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearK(X, r=NULL, ..., correction="Ang", ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the linear \eqn{K} function from point pattern data on a linear network. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is the network \eqn{K} function as defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010; Ang et al, 2012). } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian. } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{compileK}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearK(X) linearK(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/heatkernelapprox.Rd0000644000176200001440000000335114141460471020027 0ustar liggesusers\name{heatkernelapprox} \alias{heatkernelapprox} \title{ Approximation to Heat Kernel on Linear Network at Source Point } \description{ Computes an approximation to the value of the heat kernel on a network evaluated at its source location. } \usage{ heatkernelapprox(X, sigma, nmax = 20, floored=TRUE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{sigma}{ Numeric. Bandwidth for kernel. } \item{nmax}{ Number of terms to be used in the sum. } \item{floored}{ Logical. If \code{TRUE}, all values are constrained to be greater than or equal to \eqn{1/L} where \eqn{L} is the total length of the network. This the exact value of the heat kernel when the bandwidth is infinite. } } \details{ For each point \code{X[i]} in the pattern \code{X}, this algorithm computes an approximation to the value of the heat kernel with source point \code{X[i]} evaluated at the same location. The heat kernel \eqn{\kappa(u,v)} for a source location \eqn{u} evaluated at location \eqn{v} can be expressed as an infinite sum of contributions from all possible paths from \eqn{u} to \eqn{v}. This algorithm applies to the special case \eqn{u=v} where the source point and the query point are the same. The algorithm computes an approximation to \eqn{\kappa(u,u)} by taking only the contributions from paths which (a) remain in the line segment containing the point \eqn{u} and (b) visit a vertex at most \code{nmax} times. } \value{ Numeric vector with one entry for each point in \code{X}. } \author{ Greg McSwiggan and \adrian. } \seealso{ \code{\link{hotrod}} } \examples{ X <- runiflpp(3,simplenet) heatkernelapprox(X, 0.5) } \keyword{math} spatstat.linnet/man/rjitter.lpp.Rd0000644000176200001440000000322014141460471016723 0ustar liggesusers\name{rjitter.lpp} \alias{rjitter.lpp} \title{Random Perturbation of a Point Pattern on a Network} \description{ Applies independent random displacements to each point in a point pattern on a network. } \usage{ \method{rjitter}{lpp}(X, radius, \dots, nsim = 1, drop = TRUE) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{radius}{ Scale of perturbations. A positive numerical value. Each point will be displaced by a random distance, with maximum displacement equal to this value. } \item{\dots}{ Ignored. } \item{nsim}{Number of simulated realisations to be generated.} \item{drop}{ Logical. If \code{nsim=1} and \code{drop=TRUE} (the default), the result will be a point pattern, rather than a list containing a point pattern. } } \details{ The function \code{\link[spatstat.geom]{rjitter}} is generic. This function is the method for the class \code{"lpp"} of point patterns on a linear network. Each of the points in \code{X} will be displaced along the network by a random amount, independently of other points. The maximum displacement distance is specified by \code{radius}. Each point remains on the same line segment of the network as it originally was. } \value{ A point pattern on a linear network (object of class \code{"lpp"}) or a list of such point patterns. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.geom]{rjitter}} for point patterns in two dimensions. } \examples{ X <- runiflpp(3, simplenet) plot(X, pch=16) Y <- rjitter(X, 0.1) plot(Y, add=TRUE, cols=3) } \keyword{spatial} \keyword{datagen} spatstat.linnet/man/model.matrix.lppm.Rd0000644000176200001440000000472614141460471020034 0ustar liggesusers\name{model.matrix.lppm} \alias{model.matrix.lppm} \title{Extract Design Matrix from Point Process Model on a Network} \description{ Given a point process model that has been fitted to spatial point pattern data on a linear network, this function extracts the design matrix of the model. } \usage{ \method{model.matrix}{lppm}(object, data=model.frame(object, na.action=NULL), \dots, keepNA=TRUE) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"lppm"}. } \item{data}{ A model frame, containing the data required for the Berman-Turner device. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link[stats:model.matrix]{model.matrix.lm}}. } } \details{ This is a method for the generic function \code{\link[stats]{model.matrix}}. It extracts the design matrix of a spatial point process model on a linear network (object of class \code{"lppm"}). More precisely, this command extracts the design matrix of the generalised linear model associated with a spatial point process model. The \code{object} must be a fitted point process model on a network (object of class \code{"lppm"}) produced by the model-fitting function \code{\link{lppm}}. The method \code{model.matrix.lppm} extracts the model matrix for the GLM. The result is a matrix, with one row for every quadrature point in the fitting procedure, and one column for every canonical covariate in the design matrix. If there are \code{NA} values in the covariates, the argument \code{keepNA} determines whether to retain or delete the corresponding rows of the model matrix. The default \code{keepNA=TRUE} is to retain them. Note that this differs from the default behaviour of many other methods for \code{model.matrix}, which typically delete rows containing \code{NA}. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. Rows of the matrix correspond to quadrature points in the fitting procedure (provided \code{keepNA=TRUE}). } \author{ \spatstatAuthors. } \seealso{ \code{\link[stats]{model.matrix}}, \code{\link{model.images.lppm}}, \code{\link{lppm}} } \examples{ fit <- lppm(spiders ~ x + y) head(model.matrix(fit)) } \keyword{spatial} \keyword{models} spatstat.linnet/man/as.lpp.Rd0000644000176200001440000000625614141460471015657 0ustar liggesusers\name{as.lpp} \Rdversion{1.1} \alias{as.lpp} \title{ Convert Data to a Point Pattern on a Linear Network } \description{ Convert various kinds of data to a point pattern on a linear network. } \usage{ as.lpp(x=NULL, y=NULL, seg=NULL, tp=NULL, \dots, marks=NULL, L=NULL, check=FALSE, sparse) } \arguments{ \item{x,y}{ Vectors of cartesian coordinates, or any data acceptable to \code{\link[grDevices]{xy.coords}}. Alternatively \code{x} can be a point pattern on a linear network (object of class \code{"lpp"}) or a planar point pattern (object of class \code{"ppp"}). } \item{seg,tp}{ Optional local coordinates. Vectors of the same length as \code{x,y}. See Details. } \item{\dots}{Ignored.} \item{marks}{ Optional marks for the point pattern. A vector or factor with one entry for each point, or a data frame or hyperframe with one row for each point. } \item{L}{ Linear network (object of class \code{"linnet"}) on which the points lie. } \item{check}{ Logical. Whether to check the validity of the spatial coordinates. } \item{sparse}{ Optional logical value indicating whether to store the linear network data in a sparse matrix representation or not. See \code{\link{linnet}}. } } \details{ This function converts data in various formats into a point pattern on a linear network (object of class \code{"lpp"}). The possible formats are: \itemize{ \item \code{x} is already a point pattern on a linear network (object of class \code{"lpp"}). Then \code{x} is returned unchanged. \item \code{x} is a planar point pattern (object of class \code{"ppp"}). Then \code{x} is converted to a point pattern on the linear network \code{L} using \code{\link{lpp}}. \item \code{x,y,seg,tp} are vectors of equal length. These specify that the \code{i}th point has Cartesian coordinates \code{(x[i],y[i])}, and lies on segment number \code{seg[i]} of the network \code{L}, at a fractional position \code{tp[i]} along that segment (with \code{tp=0} representing one endpoint and \code{tp=1} the other endpoint of the segment). \item \code{x,y} are missing and \code{seg,tp} are vectors of equal length as described above. \item \code{seg,tp} are \code{NULL}, and \code{x,y} are data in a format acceptable to \code{\link[grDevices]{xy.coords}} specifying the Cartesian coordinates. \item Only the arguments \code{x} and \code{L} are given, and \code{x} is a data frame with one of the following types: \itemize{ \item two columns labelled \code{seg,tp} interpreted as local coordinates on the network. \item two columns labelled \code{x,y} interpreted as Cartesian coordinates. \item four columns labelled \code{x,y,seg,tp} interpreted as Cartesian coordinates and local coordinates. } } } \value{ A point pattern on a linear network (object of class \code{"lpp"}). } \seealso{ \code{\link{lpp}}. } \examples{ A <- as.psp(simplenet) X <- runifpointOnLines(10, A) is.ppp(X) Y <- as.lpp(X, L=simplenet) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math} spatstat.linnet/man/as.data.frame.lintess.Rd0000644000176200001440000000427114141460471020541 0ustar liggesusers\name{as.data.frame.lintess} \alias{as.data.frame.lintess} \title{Convert Network Tessellation to Data Frame} \description{ Converts a tessellation on a linear network into a data frame. } \usage{ \method{as.data.frame}{lintess}(x, \dots) } \arguments{ \item{x}{ Tessellation on a linear network (object of class \code{"lintess"}). } \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features. } } \details{ A tessellation on a linear network is a partition of the network into non-overlapping pieces (tiles). Each tile consists of one or more line segments which are subsets of the line segments making up the network. A tile can consist of several disjoint pieces. This function converts the tessellation \code{x} to a data frame. Each row of the data frame specifies one sub-segment of the network, and allocates it to a particular tile. The data frame has the following columns: \itemize{ \item The \code{seg} column specifies which line segment of the network contains the sub-segment. Values of \code{seg} are integer indices for the network segments in \code{as.psp(as.linnet(x))}. \item The \code{t0} and \code{t1} columns specify the start and end points of the sub-segment. They are numeric values between 0 and 1 inclusive, where the values 0 and 1 representing the network vertices that are joined by this network segment. \item The \code{tile} column specifies which tile of the tessellation includes this sub-segment. It is a factor whose levels are the names of the tiles. } The tessellation may have marks, which are attached to the \emph{tiles} of the tessellation. If marks are present, the resulting data frame includes columns containing, for each sub-segment, the mark value of the corresponding tile. } \value{ A data frame with columns named \code{seg}, \code{t0}, \code{t1}, \code{tile}, and possibly other columns. } \author{ \spatstatAuthors. } \seealso{ \code{\link{lintess}} } \examples{ X <- lineardirichlet(runiflpp(3, simplenet)) marks(X) <- letters[1:3] as.data.frame(X) } \keyword{spatial} \keyword{methods} spatstat.linnet/man/connected.linnet.Rd0000644000176200001440000000323714141460471017710 0ustar liggesusers\name{connected.linnet} \alias{connected.linnet} \title{ Connected Components of a Linear Network } \description{ Find the topologically-connected components of a linear network. } \usage{ \method{connected}{linnet}(X, \dots, what = c("labels", "components")) } \arguments{ \item{X}{ A linear network (object of class \code{"linnet"}). } \item{\dots}{ Ignored. } \item{what}{ Character string specifying the kind of result. } } \details{ The function \code{connected} is generic. This is the method for linear networks (objects of class \code{"linnet"}). Two vertices of the network are connected if they are joined by a path in the network. This function divides the network into subsets, such that all points in a subset are connected to each other. If \code{what="labels"} the return value is a factor with one entry for each vertex of \code{X}, identifying which connected component the vertex belongs to. If \code{what="components"} the return value is a list of linear networks, which are the connected components of \code{X}. } \value{ If \code{what="labels"}, a factor. If \code{what="components"}, a list of linear networks. } \author{ \adrian and Suman Rakshit. } \seealso{ \code{\link{thinNetwork}} } \examples{ # remove some edges from a network to make it disconnected plot(simplenet, col="grey", main="", lty=2) A <- thinNetwork(simplenet, retainedges=-c(3,5)) plot(A, add=TRUE, lwd=2) # find the connected components connected(A) cA <- connected(A, what="components") plot(cA[[1]], add=TRUE, col="green", lwd=2) plot(cA[[2]], add=TRUE, col="blue", lwd=2) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/is.multitype.lpp.Rd0000644000176200001440000000332314141460471017712 0ustar liggesusers\name{is.multitype.lpp} \alias{is.multitype.lpp} \title{Test Whether A Point Pattern on a Network is Multitype} \description{ Tests whether a point pattern on a network has ``marks'' attached to the points which classify the points into several types. } \usage{ \method{is.multitype}{lpp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern on a linear networl (object of class \code{"lpp"}). } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a multitype point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{chicago}} dataset contains the locations of crimes, each crime location being marked by the type of crime. This function tests whether the point pattern \code{X} contains or involves marked points, \bold{and} that the marks are a factor. It is a method for the generic function \code{\link[spatstat.geom]{is.multitype}}. The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link[spatstat.geom]{is.multitype}}, \code{\link{is.multitype.lppm}} } \examples{ is.multitype(chicago) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} spatstat.linnet/man/predict.lppm.Rd0000644000176200001440000000651414141460471017060 0ustar liggesusers\name{predict.lppm} \alias{predict.lppm} \title{ Predict Point Process Model on Linear Network } \description{ Given a fitted point process model on a linear network, compute the fitted intensity or conditional intensity of the model. } \usage{ \method{predict}{lppm}(object, ..., type = "trend", locations = NULL, new.coef=NULL) } \arguments{ \item{object}{ The fitted model. An object of class \code{"lppm"}, see \code{\link{lppm}}. } \item{type}{ Type of values to be computed. Either \code{"trend"}, \code{"cif"} or \code{"se"}. } \item{locations}{ Optional. Locations at which predictions should be computed. Either a data frame with two columns of coordinates, or a binary image mask. } \item{new.coef}{ Optional. Numeric vector of model coefficients, to be used instead of the fitted coefficients \code{coef(object)} when calculating the prediction. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution (if \code{locations} is missing). } } \details{ This function computes the fitted poin process intensity, fitted conditional intensity, or standard error of the fitted intensity, for a point process model on a linear network. It is a method for the generic \code{\link[stats]{predict}} for the class \code{"lppm"}. The argument \code{object} should be an object of class \code{"lppm"} (produced by \code{\link{lppm}}) representing a point process model on a linear network. Predicted values are computed at the locations given by the argument \code{locations}. If this argument is missing, then predicted values are computed at a fine grid of points on the linear network. \itemize{ \item If \code{locations} is missing or \code{NULL} (the default), the return value is a pixel image (object of class \code{"linim"} which inherits class \code{"im"}) corresponding to a discretisation of the linear network, with numeric pixel values giving the predicted values at each location on the linear network. \item If \code{locations} is a data frame, the result is a numeric vector of predicted values at the locations specified by the data frame. \item If \code{locations} is a binary mask, the result is a pixel image with predicted values computed at the pixels of the mask. } } \value{ A pixel image (object of class \code{"linim"} which inherits class \code{"im"}) or a numeric vector, depending on the argument \code{locations}. See Details. } \author{ \adrian } \seealso{ \code{\link{lpp}}, \code{\link{linim}} } \examples{ X <- runiflpp(12, simplenet) fit <- lppm(X ~ x) v <- predict(fit, type="trend") plot(v) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat.linnet/man/chop.linnet.Rd0000644000176200001440000000203714141460471016674 0ustar liggesusers\name{chop.linnet} \alias{chop.linnet} \title{ Divide a Linear Network into Tiles Using Infinite Lines } \description{ Given a linear network and a set of infinite lines, divide the network into tiles demarcated by the lines. The result is a tessellation of the network. } \usage{ chop.linnet(X, L) } \arguments{ \item{X}{ Linear network (object of class \code{"linnet"}) or data acceptable to \code{\link{as.linnet}}. } \item{L}{ Infinite line or lines (object of class \code{"infline"}). } } \details{ The first line of \code{L} divides \code{X} into two tiles. Subsequent lines divide each of these tiles. The result is a tessellation of \code{X}. Tiles are not necessarily connected sets. } \value{ Tessellation on a linear network (object of class \code{"lintess"}). } \author{ \adrian. } \seealso{ \code{\link{crossing.linnet}} } \examples{ L <- infline(p=runif(3), theta=runif(3, max=pi/2)) Y <- chop.linnet(simplenet, L) plot(Y, main="") plot(L, col="red") } \keyword{spatial} \keyword{manip} spatstat.linnet/man/linearKinhom.Rd0000644000176200001440000001326214141460471017075 0ustar liggesusers\name{linearKinhom} \alias{linearKinhom} \title{ Inhomogeneous Linear K Function } \description{ Computes an estimate of the inhomogeneous linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearKinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points, raised to \code{normpower}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{update}{ Logical value indicating what to do when \code{lambda} is a fitted model (class \code{"lppm"} or \code{"ppm"}). If \code{update=TRUE} (the default), the model will first be refitted to the data \code{X} (using \code{\link{update.lppm}} or \code{\link{update.ppm}}) before the fitted intensity is computed. If \code{update=FALSE}, the fitted intensity of the model will be computed without re-fitting it to \code{X}. } \item{leaveoneout}{ Logical value (passed to \code{\link{fitted.lppm}} or \code{\link{fitted.ppm}}) specifying whether to use a leave-one-out rule when calculating the intensity, when \code{lambda} is a fitted model. Supported only when \code{update=TRUE}. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes the inhomogeneous version of the linear \eqn{K} function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous \eqn{K} function \code{\link{linearK}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{lambda} is a fitted point process model, the default behaviour is to update the model by re-fitting it to the data, before computing the fitted intensity. This can be disabled by setting \code{update=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). Each estimate is initially computed as \deqn{ \widehat K_{\rm inhom}(r) = \frac{1}{\mbox{length}(L)} \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j)}{\lambda(x_i)\lambda(x_j)} }{ K^inhom(r)= (1/length(L)) sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j])/(lambda(x[i]) * lambda(x[j])) } where \code{L} is the linear network, \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j)}{e(x[i],x[j])} is a weight. If \code{correction="none"} then this weight is equal to 1, while if \code{correction="Ang"} the weight is \eqn{e(x_i,x_j,r) = 1/m(x_i, d_{ij})}{e(x[i],x[j],r) = 1/m(x[i],d[i,j])} where \eqn{m(u,t)} is the number of locations on the network that lie exactly \eqn{t} units distant from location \eqn{u} by the shortest path. If \code{normalise=TRUE} (the default), then the estimates described above are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{length}(L)/\sum (1/\lambda(x_i)). }{ c = length(L)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \seealso{ \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X ~x) K <- linearKinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/plot.linnet.Rd0000644000176200001440000000232414141460471016720 0ustar liggesusers\name{plot.linnet} \alias{plot.linnet} \title{ Plot a linear network } \description{ Plots a linear network } \usage{ \method{plot}{linnet}(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE, do.plot=TRUE) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{plot.psp}} controlling the plot. } \item{main}{ Main title for plot. Use \code{main=""} to suppress it. } \item{add}{ Logical. If code{TRUE}, superimpose the graphics over the current plot. If \code{FALSE}, generate a new plot. } \item{vertices}{ Logical. Whether to plot the vertices as well. } \item{window}{ Logical. Whether to plot the window containing the linear network. } \item{do.plot}{ Logical. Whether to actually perform the plot. } } \details{ This is the plot method for class \code{"linnet"}. } \value{ An (invisible) object of class \code{"owin"} giving the bounding box of the network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{linnet}} } \examples{ plot(simplenet) } \keyword{spatial} spatstat.linnet/man/lineardisc.Rd0000644000176200001440000000764114141460471016576 0ustar liggesusers\name{lineardisc} \alias{lineardisc} \alias{countends} \title{ Compute Disc of Given Radius in Linear Network } \description{ Computes the \sQuote{disc} of given radius and centre in a linear network. } \usage{ lineardisc(L, x = locator(1), r, plotit = TRUE, cols=c("blue", "red","green"), add=TRUE) countends(L, x = locator(1), r, toler=NULL, internal=list()) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{x}{ Location of centre of disc. Either a point pattern (object of class \code{"ppp"}) containing exactly 1 point, or a numeric vector of length 2. } \item{r}{ Radius of disc. } \item{plotit}{ Logical. Whether to plot the disc. } \item{add}{ Logical. If \code{add=TRUE} (the default), the disc will be plotted on the current plot frame. If \code{add=FALSE}, a new plot frame will be started, the entire network will be displayed, and then the disc will be plotted over this. } \item{cols}{ Colours for plotting the disc. A numeric or character vector of length 3 specifying the colours of the disc centre, disc lines and disc endpoints respectively. } \item{toler}{ Optional. Distance threshold for \code{countends}. See Details. There is a sensible default. } \item{internal}{Argument for internal use by the package.} } \details{ The \sQuote{disc} \eqn{B(u,r)} of centre \eqn{x} and radius \eqn{r} in a linear network \eqn{L} is the set of all points \eqn{u} in \eqn{L} such that the shortest path distance from \eqn{x} to \eqn{u} is less than or equal to \eqn{r}. This is a union of line segments contained in \eqn{L}. The \emph{relative boundary} of the disc \eqn{B(u,r)} is the set of points \eqn{v} such that the shortest path distance from \eqn{x} to \eqn{u} is \emph{equal} to \eqn{r}. The function \code{lineardisc} computes the disc of radius \eqn{r} and its relative boundary, optionally plots them, and returns them. The faster function \code{countends} simply counts the number of points in the relative boundary. Note that \code{countends} requires the linear network \code{L} to be given in the non-sparse matrix format (see the argument \code{sparse} in \code{\link{linnet}} or \code{\link{as.linnet}}) while \code{lineardisc} accepts both sparse and non-sparse formats. The optional threshold \code{toler} is used to suppress numerical errors in \code{countends}. If the distance from \eqn{u} to a network vertex \eqn{v} is between \code{r-toler} and \code{r+toler}, the vertex will be treated as lying on the relative boundary. } \value{ The value of \code{lineardisc} is a list with two entries: \item{lines }{Line segment pattern (object of class \code{"psp"}) representing the interior disc} \item{endpoints}{Point pattern (object of class \code{"ppp"}) representing the relative boundary of the disc. } The value of \code{countends} is an integer giving the number of points in the relative boundary. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and \adrian } \seealso{ \code{\link{linnet}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. } \examples{ # letter 'A' v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) plot(letterA) lineardisc(letterA, c(0,3), 1.6) # count the endpoints countends(letterA, c(0,3), 1.6) # cross-check (slower) en <- lineardisc(letterA, c(0,3), 1.6, plotit=FALSE)$endpoints npoints(en) } \keyword{spatial} spatstat.linnet/man/joinVertices.Rd0000644000176200001440000000374714141460471017130 0ustar liggesusers\name{joinVertices} \alias{joinVertices} \title{ Join Vertices in a Network } \description{ Join the specified vertices in a linear network, creating a new network. } \usage{ joinVertices(L, from, to, marks=NULL) } \arguments{ \item{L}{ A linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{from,to}{ Integers, or integer vectors of equal length, specifying the vertices which should be joined. Alternatively \code{from} can be a 2-column matrix of integers and \code{to} is missing or \code{NULL}. } \item{marks}{ Optional vector or data frame of values associated with the new edges. } } \details{ Vertices of the network are numbered by their order of appearance in the point pattern \code{vertices(L)}. If \code{from} and \code{to} are single integers, then the pair of vertices numbered \code{from} and \code{to} will be joined to make a new segment of the network. If \code{from} and \code{to} are vectors of integers, then vertex \code{from[i]} will be joined to vertex \code{to[i]} for each \code{i = 1,2,..}. If \code{L} is a network (class \code{"linnet"}), the result is another network, created by adding new segments. If \code{L} is a point pattern on a network (class \code{"lpp"}), the result is another point pattern object, created by adding new segments to the underlying network, and retaining the points. In the resulting object, the new line segments are appended to the existing list of line segments. } \value{ A linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \author{ \spatstatAuthors. } \seealso{ \code{\link{linnet}}, \code{\link{methods.linnet}}, \code{\link{thinNetwork}} } \examples{ snet <- joinVertices(simplenet, 4, 8) plot(solist(simplenet, snet), main="") X <- runiflpp(3, simplenet) Y <- joinVertices(X, 4, 8) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/nncross.lpp.Rd0000644000176200001440000001042414141460471016731 0ustar liggesusers\name{nncross.lpp} \alias{nncross.lpp} \title{Nearest Neighbours on a Linear Network} \description{ Given two point patterns \code{X} and \code{Y} on a linear network, finds the nearest neighbour in \code{Y} of each point of \code{X} using the shortest path in the network. } \usage{ \method{nncross}{lpp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} linear network. } \item{iX, iY}{ Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{Ignored.} \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour, for each value of \code{k}. } \item{method}{ Internal use only. } } \details{ Given two point patterns \code{X} and \code{Y} on the same linear network, this function finds, for each point of \code{X}, the nearest point of \code{Y}, measuring distance by the shortest path in the network. The distance between these points is also computed. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. The \code{k}th nearest neighbour may be undefined, for example if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. In this case, the \code{k}th nearest neighbour distance is infinite. } \value{ By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"}, a vector of nearest neighbour distances. If \code{what="which"}, a vector of nearest neighbour indices. If \code{k} is a vector of integers, the result is a matrix with one row for each point in \code{X}, giving the distances and/or indices of the \code{k}th nearest neighbours in \code{Y}. } \seealso{ \code{\link{nndist.lpp}} for nearest neighbour distances in a single point pattern. \code{\link{nnwhich.lpp}} to identify which points are nearest neighbours in a single point pattern. } \examples{ # two different point patterns X <- runiflpp(3, simplenet) Y <- runiflpp(5, simplenet) nn <- nncross(X,Y) nn plot(simplenet, main="nncross") plot(X, add=TRUE, cols="red") plot(Y, add=TRUE, cols="blue", pch=16) XX <- as.ppp(X) YY <- as.ppp(Y) i <- nn$which arrows(XX$x, XX$y, YY[i]$x, YY[i]$y, length=0.15) # nearest and second-nearest neighbours nncross(X, Y, k=1:2) # two patterns with some points in common X <- Y[1:2] iX <- 1:2 iY <- 1:5 nncross(X,Y, iX, iY) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat.linnet/man/linearKdot.Rd0000644000176200001440000000526114141460471016551 0ustar liggesusers\name{linearKdot} \alias{linearKdot} \title{ Multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{Kdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{Kdot}}, \code{\link{linearKcross}}, \code{\link{linearK}}. } \examples{ data(chicago) K <- linearKdot(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/text.lpp.Rd0000644000176200001440000000262514141460471016234 0ustar liggesusers\name{text.lpp} \alias{text.lpp} \title{ Add Text Labels to Point Pattern on a Network } \description{ Plots a text label at the location of each point, for a point pattern on a linear network. } \usage{ \method{text}{lpp}(x, \dots) } \arguments{ \item{x}{ A point pattern on a linear network (class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link[graphics]{text.default}}. } } \details{ This function is a method for the generic \code{\link[graphics]{text}}. A text label is added to the existing plot, at the location of each point in the point pattern \code{x}, or near the location of the midpoint of each segment in the segment pattern \code{x}. Additional arguments \code{\dots} are passed to \code{\link[graphics]{text.default}} and may be used to control the placement of the labels relative to the point locations, and the size and colour of the labels. By default, the labels are the serial numbers 1 to \code{n}, where \code{n} is the number of points or segments in \code{x}. This can be changed by specifying the argument \code{labels}, which should be a vector of length \code{n}. } \value{ Null. } \author{ \spatstatAuthors. } \seealso{ \code{\link[graphics]{text.default}}, \code{\link[spatstat.geom]{text.ppp}} } \examples{ X <- runiflpp(5, simplenet) plot(X) text(X, pos=2, col="blue") } \keyword{spatial} \keyword{hplot} spatstat.linnet/man/data.lppm.Rd0000644000176200001440000000217314141460471016334 0ustar liggesusers\name{data.lppm} \alias{data.lppm} \title{Extract Original Data from a Fitted Point Process Model on a Network} \description{ Given a fitted point process model on a linear network, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.lppm(object) } \arguments{ \item{object}{ fitted point process model on a linear network (an object of class \code{"lppm"}). } } \value{ A point pattern on a linear network (object of class \code{"lpp"}). } \details{ An object of class \code{"lppm"} represents a point process model that has been fitted to a point pattern dataset on a linear network. It is typically produced by the model-fitting algorithm \code{\link{lppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. } \seealso{ \code{\link{lppm}}, \code{\link{data.ppm}} } \examples{ fit <- lppm(spiders ~ x) X <- data.lppm(fit) # 'X' is identical to 'spiders' } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.linnet/man/divide.linnet.Rd0000644000176200001440000000214514141460471017207 0ustar liggesusers\name{divide.linnet} \alias{divide.linnet} \title{ Divide Linear Network at Cut Points } \description{ Make a tessellation of a linear network by dividing it into pieces demarcated by the points of a point pattern. } \usage{ divide.linnet(X) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } } \details{ The points \code{X} are interpreted as dividing the linear network \code{L=as.linnet(X)} into separate pieces. Two locations on \code{L} belong to the same piece if and only if they can be joined by a path in \code{L} that does not cross any of the points of \code{X}. The result is a tessellation of the network (object of class \code{"lintess"}) representing the division of \code{L} into pieces. } \value{ A tessellation on a linear network (object of class \code{"lintess"}). } \author{ \spatstatAuthors and Greg McSwiggan. } \seealso{ \code{\link{linnet}}, \code{\link{lintess}}. } \examples{ X <- runiflpp(5, simplenet) plot(divide.linnet(X)) plot(X, add=TRUE, pch=16, show.network=FALSE) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/linearpcfdot.Rd0000644000176200001440000000540714141460471017131 0ustar liggesusers\name{linearpcfdot} \alias{linearpcfdot} \title{ Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}. \code{\link{pcfcross}}. } \examples{ data(chicago) g <- linearpcfdot(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/densityfun.lpp.Rd0000644000176200001440000000666314141460471017446 0ustar liggesusers\name{densityfun.lpp} \alias{densityfun.lpp} \title{ Kernel Estimate of Intensity on a Linear Network as a Spatial Function } \description{ Computes a kernel estimate of the intensity of a point process on a linear network, and returns the intensity estimate as a function of spatial location. } \usage{ \method{densityfun}{lpp}(X, sigma, \dots, weights=NULL, nsigma=1, verbose=FALSE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{sigma}{ Bandwidth of kernel (standard deviation of Gaussian kernel), in the same units of length as \code{X}. } \item{\dots}{ Arguments passed to \code{\link{density.lpp}} to control the discretisation. } \item{weights}{ Optional numeric vector of weights associated with the points of \code{X}. } \item{nsigma}{ Integer. The number of different bandwidths for which a result should be returned. If \code{nsigma=1} (the default), the result is a function giving kernel estimate with bandwidth \code{sigma}. If \code{nsigma > 1}, the result is a function with an additional argument \code{k} containing the kernel estimates for the \code{nsigma+1} equally-spaced time steps from \code{0} to \code{sigma^2}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ Kernel smoothing is applied to the points of \code{X} using the diffusion algorithm of McSwiggan et al (2016). The result is a function on the linear network (object of class \code{"linfun"}) that can be printed, plotted and evaluated at any location. This is a method for the generic function \code{\link[spatstat.core]{densityfun}} for the class \code{"lpp"} of point patterns on a linear network. } \value{ Function on a linear network (object of class \code{"linfun"}). If \code{nsigma=1} (the default), the result is a function giving kernel estimate with bandwidth \code{sigma}. If \code{nsigma > 1}, the result is a function with an additional argument \code{k}. If \code{k} is specified, the function returns the kernel estimate for bandwidth \code{tau = sigma * sqrt(k/nsigma)}. If \code{k} is not specified, results are returned for all \code{k = 1, 2, ..., nsigma}. The result also has attributes \itemize{ \item \code{attr(result, "dt")} giving the time step \eqn{\Delta t}{Delta t}; \item \code{attr(result, "dx")} giving the spacing \eqn{\Delta x}{Delta x} between sample points in the numerical algorithm; \item \code{attr(result, "sigma")} giving the smoothing bandwidth \eqn{\sigma}{sigma} used (or the successive bandwidths used at each sampled time step, if \code{nsigma > 1}). } } \references{ McSwiggan, G., Baddeley, A. and Nair, G. (2016) Kernel Density Estimation on a Linear Network. \emph{Scandinavian Journal of Statistics} \bold{44}, 324--345. } \author{ Greg McSwiggan, with tweaks by \adrian. } \seealso{ \code{\link{density.lpp}} which returns a pixel image on the linear network. \code{\link{methods.linfun}} for methods applicable to \code{"linfun"} objects. } \examples{ X <- unmark(chicago) # single bandwidth g <- densityfun(X, 30) plot(g) Y <- X[1:5] g(Y) # weighted gw <- densityfun(X, 30, weights=runif(npoints(X))) # sequence of bandwidths g10 <- densityfun(X, 30, nsigma=10) g10(Y, k=10) g10(Y) plot(as.linim(g10, k=5)) } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/nnfun.lpp.Rd0000644000176200001440000000570414141460471016375 0ustar liggesusers\name{nnfun.lpp} \Rdversion{1.1} \alias{nnfun.lpp} \title{ Nearest Neighbour Map on Linear Network } \description{ Compute the nearest neighbour function of a point pattern on a linear network. } \usage{ \method{nnfun}{lpp}(X, ..., k=1, value=c("index", "mark")) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{k}{ Integer. The algorithm finds the \code{k}th nearest neighbour in \code{X} from any spatial location. } \item{value}{ String (partially matched) specifying whether to return the index of the neighbour (\code{value="index"}, the default) or the mark value of the neighbour (\code{value="mark"}). } \item{\dots}{ Other arguments are ignored. } } \details{ The (geodesic) \emph{nearest neighbour function} of a point pattern \code{X} on a linear network \code{L} tells us which point of \code{X} is closest to any given location. If \code{X} is a point pattern on a linear network \code{L}, the \emph{nearest neighbour function} of \code{X} is the mathematical function \eqn{f} defined for any location \eqn{s} on the network by \code{f(s) = i}, where \code{X[i]} is the closest point of \code{X} to the location \code{s} measured by the shortest path. In other words the value of \code{f(s)} is the identifier or serial number of the closest point of \code{X}. The command \code{nnfun.lpp} is a method for the generic command \code{\link{nnfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y, \dots}, that represents the nearest neighbour function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields a vector of identifiers or serial numbers of the data points closest to these spatial locations. More efficiently \code{f} can take the arguments \code{x, y, seg, tp} where \code{seg} and \code{tp} are the local coordinates on the network. The result of \code{f <- nnfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} in the \R language, with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To compute the \emph{distance} to the nearest neighbour, see \code{\link{distfun.lpp}}. } \examples{ X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) plot(nnfun(chicago, value="m")) } \author{ \spatstatAuthors } \keyword{spatial} \keyword{math} spatstat.linnet/man/lppm.Rd0000644000176200001440000000723114141460471015424 0ustar liggesusers\name{lppm} \alias{lppm} \alias{lppm.formula} \alias{lppm.lpp} \title{ Fit Point Process Model to Point Pattern on Linear Network } \description{ Fit a point process model to a point pattern dataset on a linear network } \usage{ lppm(X, ...) \method{lppm}{formula}(X, interaction=NULL, ..., data=NULL) \method{lppm}{lpp}(X, ..., eps=NULL, nd=1000, random=FALSE) } \arguments{ \item{X}{ Either an object of class \code{"lpp"} specifying a point pattern on a linear network, or a \code{formula} specifying the point process model. } \item{\dots}{ Arguments passed to \code{\link{ppm}}. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{data}{ Optional. The values of spatial covariates (other than the Cartesian coordinates) required by the model. A list whose entries are images, functions, windows, tessellations or single numbers. } \item{eps}{ Optional. Spacing between dummy points along each segment of the network. } \item{nd}{ Optional. Total number of dummy points placed on the network. Ignored if \code{eps} is given. } \item{random}{ Logical value indicating whether the grid of dummy points should be placed at a randomised starting position. } } \details{ This function fits a point process model to data that specify a point pattern on a linear network. It is a counterpart of the model-fitting function \code{\link{ppm}} designed to work with objects of class \code{"lpp"} instead of \code{"ppp"}. The function \code{lppm} is generic, with methods for the classes \code{formula} and \code{lppp}. In \code{lppm.lpp} the first argument \code{X} should be an object of class \code{"lpp"} (created by the command \code{\link{lpp}}) specifying a point pattern on a linear network. In \code{lppm.formula}, the first argument is a \code{formula} in the \R language describing the spatial trend model to be fitted. It has the general form \code{pattern ~ trend} where the left hand side \code{pattern} is usually the name of a point pattern on a linear network (object of class \code{"lpp"}) to which the model should be fitted, or an expression which evaluates to such a point pattern; and the right hand side \code{trend} is an expression specifying the spatial trend of the model. Other arguments \code{...} are passed from \code{lppm.formula} to \code{lppm.lpp} and from \code{lppm.lpp} to \code{\link{ppm}}. } \value{ An object of class \code{"lppm"} representing the fitted model. There are methods for \code{print}, \code{predict}, \code{coef} and similar functions. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{methods.lppm}}, \code{\link{predict.lppm}}, \code{\link{ppm}}, \code{\link{lpp}}. } \examples{ X <- runiflpp(15, simplenet) lppm(X ~1) lppm(X ~x) marks(X) <- factor(rep(letters[1:3], 5)) lppm(X ~ marks) lppm(X ~ marks * x) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. \emph{Scandinavian Journal of Statistics} \bold{39}, 591--617. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat.linnet/man/nndist.lpp.Rd0000644000176200001440000000460514141460471016547 0ustar liggesusers\name{nndist.lpp} \alias{nndist.lpp} \title{ Nearest neighbour distances on a linear network } \description{ Given a pattern of points on a linear network, compute the nearest-neighbour distances, measured by the shortest path in the network. } \usage{ \method{nndist}{lpp}(X, ..., k=1, by=NULL, method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will compute the distance to the nearest point in each group. } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the nearest neighbour distance for each point (i.e. the distance from each point to the nearest other point), measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. The \code{k}th nearest neighbour distance is infinite if the \code{k}th nearest neighbour does not exist. This can occur if there are fewer than \code{k+1} points in the dataset, or if the linear network is not connected. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then compute, for each point of \code{X}, the distance to the nearest neighbour \emph{in each subset}. } \value{ A numeric vector, of length equal to the number of points in \code{X}, or a matrix, with one row for each point in \code{X} and one column for each entry of \code{k}. Entries are nonnegative numbers or infinity (\code{Inf}). } \author{ \adrian } \seealso{ \code{\link{lpp}} } \examples{ X <- runiflpp(12, simplenet) nndist(X) nndist(X, k=2) marks(X) <- factor(rep(letters[1:3], 4)) nndist(X, by=marks(X)) } \keyword{spatial} spatstat.linnet/man/berman.test.lpp.Rd0000644000176200001440000001551014141460471017467 0ustar liggesusers\name{berman.test.lpp} \alias{berman.test.lppm} \alias{berman.test.lpp} \title{Berman's Tests for Point Process Model on a Network} \description{ Tests the goodness-of-fit of a Poisson point process model on a linear network, using the approach of Berman (1986). } \usage{ \method{berman.test}{lpp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{berman.test}{lppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"lppm"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"} or \code{"linim"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Additional arguments controlling the pixel resolution (arguments \code{dimyx} and \code{eps} passed to \code{\link{as.mask}}) or other undocumented features. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The \eqn{Z_1}{Z[1]} test is also known as the Lawson-Waller test. The function \code{\link[spatstat.core]{berman.test}} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). See the help file for \code{\link[spatstat.core]{berman.test}} for information on the generic function and the methods for data in two-dimensional space, classes \code{"ppp"} and \code{"ppm"}. This help file describes the methods for data on a linear network, classes \code{"lpp"} and \code{"lppm"}. \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{berman.test(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{berman.test(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{\mu} and variance \eqn{\sigma^2}{\sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-\mu)/\sigma}. Closely-related tests were proposed independently by Waller et al (1993) and Lawson (1993) so this test is often termed the Lawson-Waller test in epidemiological literature. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{ \spatstatAuthors. } \seealso{ \code{\link[spatstat.core]{cdf.test}}, \code{\link[spatstat.core]{quadrat.test}}, \code{\link[spatstat.core]{ppm}} \code{\link{lppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Lawson, A.B. (1993) On the analysis of mortality events around a prespecified fixed point. \emph{Journal of the Royal Statistical Society, Series A} \bold{156} (3) 363--377. Waller, L., Turnbull, B., Clark, L.C. and Nasca, P. (1992) Chronic Disease Surveillance and testing of clustering of disease and exposure: Application to leukaemia incidence and TCE-contaminated dumpsites in upstate New York. \emph{Environmetrics} \bold{3}, 281--300. } \examples{ #' test of complete randomness berman.test(spiders, "x") #' test of fitted model fit <- lppm(spiders ~ x) berman.test(fit, "y", "Z2") } \keyword{htest} \keyword{spatial} spatstat.linnet/man/densityEqualSplit.Rd0000644000176200001440000001053214141460471020135 0ustar liggesusers\name{densityEqualSplit} \alias{densityEqualSplit} \title{ Equal-Split Algorithm for Kernel Density on a Network } \description{ Computes a kernel density estimate on a linear network using the Okabe-Sugihara equal-split algorithms. } \usage{ densityEqualSplit(x, sigma = NULL, \dots, at = c("pixels", "points"), leaveoneout=TRUE, weights = NULL, kernel = "epanechnikov", continuous = TRUE, epsilon = 1e-06, verbose = TRUE, debug = FALSE, savehistory = TRUE) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}) to be smoothed. } \item{sigma}{ Smoothing bandwidth (standard deviation of the kernel) in the same units as the spatial coordinates of \code{x}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the resolution of the result. } \item{at}{ String (partially matched) specifying whether to compute the intensity values at a fine grid of locations on the network (\code{at="pixels"}, the default) or only at the points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{weights}{ Optional. Numeric vector of weights associated with the points of \code{x}. Weights may be positive, negative or zero. } \item{kernel}{ Character string specifying the smoothing kernel. See \code{\link{dkernel}} for possible options. } \item{continuous}{ Logical value indicating whether to compute the \dQuote{equal-split continuous} smoother (\code{continuous=TRUE}, the default) or the \dQuote{equal-split discontinuous} smoother (\code{continuous=FALSE}). } \item{epsilon}{ Tolerance value. A tail of the kernel with total mass less than \code{epsilon} may be deleted. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{debug}{ Logical value indicating whether to print debugging information. } \item{savehistory}{ Logical value indicating whether to save the entire history of the algorithm, for the purposes of evaluating performance. } } \details{ Kernel smoothing is applied to the points of \code{x} using a kernel based on path distances in the network. The result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. Smoothing is performed using one of the \dQuote{equal-split} rules described in Okabe and Sugihara (2012). \itemize{ \item If \code{continuous=TRUE} (the default), smoothing is performed using the \dQuote{equal-split continuous} rule described in Section 9.2.3 of Okabe and Sugihara (2012). The resulting function is continuous on the linear network. \item If \code{continuous=FALSE}, smoothing is performed using the \dQuote{equal-split discontinuous} rule described in Section 9.2.2 of Okabe and Sugihara (2012). The resulting function is not continuous. } Computation is performed by path-tracing as described in Okabe and Sugihara (2012). It is advisable to choose a kernel with bounded support such as \code{kernel="epanechnikov"}. With a Gaussian kernel, computation time can be long, and increases exponentially with \code{sigma}. Faster algorithms are available through \code{\link{density.lpp}}. } \section{Infinite bandwidth}{ If \code{sigma=Inf}, the resulting density estimate is constant over all locations, and is equal to the average density of points per unit length. (If the network is not connected, then this rule is applied separately to each connected component of the network). } \value{ If \code{at="pixels"} (the default), a pixel image on the linear network (object of class \code{"linim"}). If \code{at="points"}, a numeric vector with one entry for each point of \code{x}. } \references{ Okabe, A. and Sugihara, K. (2012) \emph{Spatial analysis along networks}. Wiley. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{density.lpp}} } \examples{ X <- runiflpp(3, simplenet) De <- density(X, 0.2, kernel="epanechnikov", verbose=FALSE) Ded <- density(X, 0.2, kernel="epanechnikov", continuous=FALSE, verbose=FALSE) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/density.lpp.Rd0000644000176200001440000001266214141460471016731 0ustar liggesusers\name{density.lpp} \alias{density.lpp} \alias{density.splitppx} \title{ Kernel Estimate of Intensity on a Linear Network } \description{ Estimates the intensity of a point process on a linear network by applying kernel smoothing to the point pattern data. } \usage{ \method{density}{lpp}(x, sigma=NULL, \dots, weights=NULL, distance=c("path", "euclidean"), continuous=TRUE, kernel="gaussian") \method{density}{splitppx}(x, sigma=NULL, \dots) } \arguments{ \item{x}{ Point pattern on a linear network (object of class \code{"lpp"}) to be smoothed. } \item{sigma}{ Smoothing bandwidth (standard deviation of the kernel). A single numerical value in the same units as the spatial coordinates of \code{x}. } \item{\dots}{ Additional arguments controlling the algorithm and the spatial resolution of the result. These arguments are passed either to \code{\link{densityQuick.lpp}}, \code{\link{densityHeat.lpp}} or \code{\link{densityEqualSplit}} depending on the algorithm chosen. } \item{weights}{ Optional. Numeric vector of weights associated with the points of \code{x}. Weights may be positive, negative or zero. } \item{distance}{ Character string (partially matched) specifying whether to use a kernel based on paths in the network (\code{distance="path"}, the default) or a two-dimensional kernel (\code{distance="euclidean"}). } \item{kernel}{ Character string specifying the smoothing kernel. See \code{\link{dkernel}} for possible options. } \item{continuous}{ Logical value indicating whether to compute the \dQuote{equal-split continuous} smoother (\code{continuous=TRUE}, the default) or the \dQuote{equal-split discontinuous} smoother (\code{continuous=FALSE}). Applies only when \code{distance="path"}. } } \details{ Kernel smoothing is applied to the points of \code{x} using either a kernel based on path distances in the network, or a two-dimensional kernel. The result is a pixel image on the linear network (class \code{"linim"}) which can be plotted. \itemize{ \item If \code{distance="path"} (the default) then the smoothing is performed using a kernel based on path distances in the network, as described in described in Okabe and Sugihara (2012) and McSwiggan et al (2016). \itemize{ \item If \code{continuous=TRUE} (the default), smoothing is performed using the \dQuote{equal-split continuous} rule described in Section 9.2.3 of Okabe and Sugihara (2012). The resulting function is continuous on the linear network. \item If \code{continuous=FALSE}, smoothing is performed using the \dQuote{equal-split discontinuous} rule described in Section 9.2.2 of Okabe and Sugihara (2012). The resulting function is continuous except at the network vertices. \item In the default case (where \code{distance="path"} and \code{continuous=TRUE} and \code{kernel="gaussian"}, computation is performed rapidly by solving the classical heat equation on the network, as described in McSwiggan et al (2016). The arguments are passed to \code{\link{densityHeat.lpp}} which performs the computation. Computational time is short, but increases quadratically with \code{sigma}. \item In all other cases, computation is performed by path-tracing as described in Okabe and Sugihara (2012); the arguments are passed to \code{\link{densityEqualSplit}} which performs the computation. Computation time can be extremely long, and increases exponentially with \code{sigma}. } \item If \code{distance="euclidean"}, the smoothing is performed using a two-dimensional kernel. The arguments are passed to \code{\link{densityQuick.lpp}} to perform the computation. Computation time is very short. See the help for \code{\link{densityQuick.lpp}} for further details. } There is also a method for split point patterns on a linear network (class \code{"splitppx"}) which will return a list of pixel images. } \section{Infinite bandwidth}{ If \code{sigma=Inf}, the resulting density estimate is constant over all locations, and is equal to the average density of points per unit length. (If the network is not connected, then this rule is applied separately to each connected component of the network). } \value{ A pixel image on the linear network (object of class \code{"linim"}), or in some cases, a numeric vector of length equal to \code{npoints(x)}. } \references{ McSwiggan, G., Baddeley, A. and Nair, G. (2016) Kernel density estimation on a linear network. \emph{Scandinavian Journal of Statistics} \bold{44}, 324--345. Okabe, A. and Sugihara, K. (2012) \emph{Spatial analysis along networks}. Wiley. } \author{ \adrian and Greg McSwiggan. } \seealso{ \code{\link{lpp}}, \code{\link{linim}}, \code{\link{densityQuick.lpp}}, \code{\link{densityHeat.lpp}}, \code{\link{densityVoronoi.lpp}} } \examples{ X <- runiflpp(3, simplenet) D <- density(X, 0.2, verbose=FALSE) plot(D, style="w", main="", adjust=2) Dq <- density(X, 0.2, distance="euclidean") plot(Dq, style="w", main="", adjust=2) Dw <- density(X, 0.2, weights=c(1,2,-1), verbose=FALSE) De <- density(X, 0.2, kernel="epanechnikov", verbose=FALSE) Ded <- density(X, 0.2, kernel="epanechnikov", continuous=FALSE, verbose=FALSE) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/delaunayNetwork.Rd0000644000176200001440000000250414141460471017626 0ustar liggesusers\name{delaunayNetwork} \alias{delaunayNetwork} \alias{dirichletNetwork} \title{ Linear Network of Delaunay Triangulation or Dirichlet Tessellation } \description{ Computes the edges of the Delaunay triangulation or Dirichlet tessellation of a point pattern, and returns the result as a linear network object. } \usage{ delaunayNetwork(X) dirichletNetwork(X, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{as.linnet.psp}}} } \details{ For \code{delaunayNetwork}, points of \code{X} which are neighbours in the Delaunay triangulation (see \code{\link{delaunay}}) will be joined by a straight line. The result will be returned as a linear network (object of class \code{"linnet"}). For \code{dirichletNetwork}, the Dirichlet tessellation is computed (see \code{\link{dirichlet}}) and the edges of the tiles of the tessellation are extracted. This is converted to a linear network using \code{\link{as.linnet.psp}}. } \value{ Linear network (object of class \code{"linnet"}) or \code{NULL}. } \author{ \adrian \rolf and \ege } \seealso{ \code{\link{delaunay}}, \code{\link{dirichlet}}, \code{\link{delaunayDistance}} } \examples{ LE <- delaunayNetwork(cells) LI <- dirichletNetwork(cells) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/cut.lpp.Rd0000644000176200001440000001001514141460471016033 0ustar liggesusers\name{cut.lpp} \alias{cut.lpp} \title{Classify Points in a Point Pattern on a Network} \description{ For a point pattern on a linear network, classify the points into distinct types according to the numerical marks in the pattern, or according to another variable. } \usage{ \method{cut}{lpp}(x, z=marks(x), ...) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{z}{ Data determining the classification. A numeric vector, a factor, a pixel image on a linear network (class \code{"linim"}), a function on a linear network (class \code{"linfun"}), a tessellation on a linear network (class \code{"lintess"}), a string giving the name of a column of marks, or one of the coordinate names \code{"x"}, \code{"y"}, \code{"seg"} or \code{"tp"}. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values in \code{z} to factor values in the output. See \code{\link{cut.default}}. } } \value{ A multitype point pattern on the same linear network, that is, a point pattern object (of class \code{"lpp"}) with a \code{marks} vector that is a factor. } \details{ This function has the effect of classifying each point in the point pattern \code{x} into one of several possible types. The classification is based on the dataset \code{z}, which may be either \itemize{ \item a factor (of length equal to the number of points in \code{z}) determining the classification of each point in \code{x}. Levels of the factor determine the classification. \item a numeric vector (of length equal to the number of points in \code{z}). The range of values of \code{z} will be divided into bands (the number of bands is determined by \code{\dots}) and \code{z} will be converted to a factor using \code{\link{cut.default}}. \item a pixel image on a network (object of class \code{"linim"}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a function on a network (object of class \code{"linfun"}, see \code{\link{linfun}}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a tessellation on a network (object of class \code{"lintess"}, see \code{\link{lintess}}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a character string, giving the name of one of the columns of \code{marks(x)}, if this is a data frame. \item a character string identifying one of the coordinates: the spatial coordinates \code{"x"}, \code{"y"} or the segment identifier \code{"seg"} or the fractional coordinate along the segment, \code{"tp"}. } The default is to take \code{z} to be the vector of marks in \code{x} (or the first column in the data frame of marks of \code{x}, if it is a data frame). If the marks are numeric, then the range of values of the numerical marks is divided into several intervals, and each interval is associated with a level of a factor. The result is a marked point pattern, on the same linear network, with the same point locations as \code{x}, but with the numeric mark of each point discretised by replacing it by the factor level. This is a convenient way to transform a marked point pattern which has numeric marks into a multitype point pattern, for example to plot it or analyse it. See the examples. To select some points from \code{x}, use the subset operators \code{\link{[.lpp}} or \code{\link{subset.lpp}} instead. } \seealso{ \code{\link{cut}}, \code{\link{lpp}}, \code{\link{lintess}}, \code{\link{linfun}}, \code{\link{linim}} } \examples{ X <- runiflpp(20, simplenet) f <- linfun(function(x,y,seg,tp) { x }, simplenet) plot(cut(X, f, breaks=4)) plot(cut(X, "x", breaks=4)) plot(cut(X, "seg")) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} spatstat.linnet/man/linearpcfcross.Rd0000644000176200001440000000570614141460471017476 0ustar liggesusers\name{linearpcfcross} \alias{linearpcfcross} \title{ Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link{pcfcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), \bold{63}, 673--694. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link{linearpcf}}, \code{\link{pcfcross}}. } \examples{ data(chicago) g <- linearpcfcross(chicago, "assault") } \author{\adrian } \keyword{spatial} \keyword{nonparametric} spatstat.linnet/man/bw.voronoi.Rd0000644000176200001440000000424614141460471016561 0ustar liggesusers\name{bw.voronoi} \alias{bw.voronoi} \title{ Cross Validated Bandwidth Selection for Voronoi Estimator of Intensity on a Network } \description{ Uses cross-validation to select a smoothing bandwidth for the Voronoi estimate of point process intensity on a linear network. } \usage{ bw.voronoi(X, \dots, probrange = c(0.2, 0.8), nprob = 10, prob = NULL, nrep = 100, verbose = TRUE, warn=TRUE) } \arguments{ \item{X}{ Point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } \item{probrange}{ Numeric vector of length 2 giving the range of bandwidths (retention probabilities) to be assessed. } \item{nprob}{ Integer. Number of bandwidths to be assessed. } \item{prob}{ Optional. A numeric vector of bandwidths (retention probabilities) to be assessed. Entries must be probabilities between 0 and 1. Overrides \code{nprob} and \code{probrange}. } \item{nrep}{ Number of simulated realisations to be used for the computation. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the maximum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function uses likelihood cross-validation to choose the optimal value of the thinning fraction \code{f} (the retention probability) to be used in the smoothed Voronoi estimator of point process intensity \code{\link{densityVoronoi.lpp}}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \references{ Moradi, M., Cronie, 0., Rubak, E., Lachieze-Rey, R., Mateu, J. and Baddeley, A. (2019) Resample-smoothing of Voronoi intensity estimators. \emph{Statistics and Computing}, in press. } \author{ \spatstatAuthors and Mehdi Moradi. } \seealso{ \code{\link{densityVoronoi.lpp}} } \examples{ np <- if(interactive()) 10 else 3 nr <- if(interactive()) 100 else 2 b <- bw.voronoi(spiders, nprob=np, nrep=nr) b plot(b) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat.linnet/man/mean.linim.Rd0000644000176200001440000000306414141460471016503 0ustar liggesusers\name{mean.linim} \alias{mean.linim} \alias{median.linim} \alias{quantile.linim} \title{Mean, Median, Quantiles of Pixel Values on a Linear Network} \description{ Calculates the mean, median, or quantiles of the pixel values in a pixel image on a linear network. } \usage{ \method{mean}{linim}(x, \dots) \method{median}{linim}(x, \dots) \method{quantile}{linim}(x, probs=seq(0,1,0.25), \dots) } \arguments{ \item{x}{ A pixel image on a linear network (object of class \code{"linim"}). } \item{probs}{ Vector of probabilities for which quantiles should be calculated. } \item{\dots}{Arguments passed to other methods.} } \details{ These functions calculate the mean, median and quantiles of the pixel values in the image \code{x} on a linear network. An object of class \code{"linim"} describes a pixel image on a linear network. See \code{\link{linim}}. The functions described here are methods for the generic \code{\link{mean}}, \code{\link[stats]{median}} and \code{\link[stats]{quantile}} for the class \code{"linim"}. } \value{ For \code{mean} and \code{median}, a single number. For \code{quantile}, a numeric vector of the same length as \code{probs}. } \seealso{ \code{\link{mean}}, \code{\link[stats]{median}}, \code{\link[stats]{quantile}}, \code{\link{mean.im}}. } \examples{ M <- as.mask.psp(as.psp(simplenet)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(simplenet, Z) X mean(X) median(X) quantile(X) } \author{ \spatstatAuthors. } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat.linnet/man/insertVertices.Rd0000644000176200001440000000470414141460471017467 0ustar liggesusers\name{insertVertices} \alias{insertVertices} \title{ Insert New Vertices in a Linear Network } \description{ Adds new vertices to a linear network at specified locations along the network. } \usage{ insertVertices(L, \dots) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}) or point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Additional arguments passed to \code{\link{as.lpp}} specifying the positions of the new vertices along the network. } } \details{ This function adds new vertices at locations along an existing linear network. The argument \code{L} can be either a linear network (class \code{"linnet"}) or some other object that includes a linear network. The new vertex locations can be specified either as a point pattern (class \code{"lpp"} or \code{"ppp"}) or using coordinate vectors \code{x,y} or \code{seg,tp} or \code{x,y,seg,tp} as explained in the help for \code{\link{as.lpp}}. This function breaks the existing line segments of \code{L} into pieces at the locations specified by the coordinates \code{seg,tp} and creates new vertices at these locations. The result is the modified object, with an attribute \code{"id"} such that the \code{i}th added vertex has become the \code{id[i]}th vertex of the new network. } \value{ An object of the same class as \code{L} representing the result of adding the new vertices. The result also has an attribute \code{"id"} as described in Details. } \author{ Adrian Baddeley } \seealso{ \code{\link{addVertices}} to create new vertices at locations which are not yet on the network. \code{\link{as.lpp}}, \code{\link{linnet}}, \code{\link{methods.linnet}}, \code{\link{joinVertices}}, \code{\link{thinNetwork}}. } \examples{ opa <- par(mfrow=c(1,3), mar=rep(0,4)) simplenet plot(simplenet, main="") plot(vertices(simplenet), add=TRUE) # add two new vertices at specified local coordinates L <- insertVertices(simplenet, seg=c(3,7), tp=c(0.2, 0.5)) L plot(L, main="") plot(vertices(L), add=TRUE) id <- attr(L, "id") id plot(vertices(L)[id], add=TRUE, pch=16) # add new vertices at three randomly-generated points X <- runiflpp(3, simplenet) LL <- insertVertices(simplenet, X) plot(LL, main="") plot(vertices(LL), add=TRUE) ii <- attr(LL, "id") plot(vertices(LL)[ii], add=TRUE, pch=16) par(opa) } \keyword{spatial} \keyword{manip} spatstat.linnet/man/is.multitype.lppm.Rd0000644000176200001440000000376314141460471020077 0ustar liggesusers\name{is.multitype.lppm} \alias{is.multitype.lppm} \title{Test Whether A Point Process Model is Multitype} \description{ Tests whether a fitted point process model on a network involves ``marks'' attached to the points that classify the points into several types. } \usage{ \method{is.multitype}{lppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model on a linear network (object of class \code{"lppm"}) usually obtained from \code{\link{lppm}}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a multitype point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{chicago}} dataset contains the locations of crimes, each crime location being marked by the type of crime. The argument \code{X} is a fitted point process model on a network (an object of class \code{"lppm"}) typically obtained by fitting a model to point pattern data on a network using \code{\link{lppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a multitype point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). See the Examples for a trick for doing this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link[spatstat.geom]{is.multitype}}, \code{\link{is.multitype.lpp}} } \examples{ fit <- lppm(chicago ~ x) is.multitype(fit) # TRUE because chicago data are multitype ## To check whether the model involves marks: "marks" \%in\% spatstat.utils::variablesinformula(formula(fit)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.linnet/man/is.marked.lppm.Rd0000644000176200001440000000357414141460471017306 0ustar liggesusers\name{is.marked.lppm} \alias{is.marked.lppm} \title{Test Whether A Point Process Model is Marked} \description{ Tests whether a fitted point process model on a network involves ``marks'' attached to the points. } \usage{ \method{is.marked}{lppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model on a linear networ (object of class \code{"lppm"}) usually obtained from \code{\link{lppm}}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a marked point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link[spatstat.data]{chicago}} dataset contains the locations of crimes, each crime location being marked by the type of crime. The argument \code{X} is a fitted point process model on a network (an object of class \code{"lppm"}) typically obtained by fitting a model to point pattern data using \code{\link{lppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a marked point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). See the Examples for a trick to do this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link[spatstat.geom]{is.marked}}. } \examples{ fit <- lppm(chicago ~ x) is.marked(fit) ## result is TRUE, i.e. the data are marked ## To check whether the model involves marks: "marks" \%in\% spatstat.utils::variablesinformula(formula(fit)) } \author{ \adrian and \rolf } \keyword{spatial} \keyword{manip} \keyword{models} spatstat.linnet/DESCRIPTION0000644000176200001440000000656614155176075015143 0ustar liggesusersPackage: spatstat.linnet Version: 2.3-1 Date: 2021-12-11 Title: Linear Networks Functionality of the 'spatstat' Family Authors@R: c(person("Adrian", "Baddeley", role = c("aut", "cre"), email = "Adrian.Baddeley@curtin.edu.au"), person("Rolf", "Turner", role = "aut", email="r.turner@auckland.ac.nz"), person("Ege", "Rubak", role = "aut", email = "rubak@math.aau.dk"), person("Ottmar", "Cronie", role = "ctb"), person("Tilman", "Davies", role = "ctb"), person("Greg", "McSwiggan", role = "ctb"), person("Suman", "Rakshit", role = "ctb")) Maintainer: Adrian Baddeley Depends: R (>= 3.5.0), spatstat.data (>= 2.1-0), spatstat.geom (>= 2.3-0), spatstat.core (>= 2.3-0), stats, graphics, grDevices, methods, utils Imports: spatstat.utils (>= 2.2-0), Matrix, spatstat.sparse (>= 2.0) Suggests: goftest, locfit, spatstat (>= 2.0-0) Description: Defines types of spatial data on a linear network and provides functionality for geometrical operations, data analysis and modelling of data on a linear network, in the 'spatstat' family of packages. Contains definitions and support for linear networks, including creation of networks, geometrical measurements, topological connectivity, geometrical operations such as inserting and deleting vertices, intersecting a network with another object, and interactive editing of networks. Data types defined on a network include point patterns, pixel images, functions, and tessellations. Exploratory methods include kernel estimation of intensity on a network, K-functions and pair correlation functions on a network, simulation envelopes, nearest neighbour distance and empty space distance, relative risk estimation with cross-validated bandwidth selection. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Monte Carlo, Diggle-Cressie-Loosmore-Ford, Dao-Genton, two-stage Monte Carlo) and tests for covariate effects (Cox-Berman-Waller-Lawson, Kolmogorov-Smirnov, ANOVA) are also supported. Parametric models can be fitted to point pattern data using the function lppm() similar to glm(). Only Poisson models are implemented so far. Models may involve dependence on covariates and dependence on marks. Models are fitted by maximum likelihood. Fitted point process models can be simulated, automatically. Formal hypothesis tests of a fitted model are supported (likelihood ratio test, analysis of deviance, Monte Carlo tests) along with basic tools for model selection (stepwise(), AIC()) and variable selection (sdr). Tools for validating the fitted model include simulation envelopes, residuals, residual plots and Q-Q plots, leverage and influence diagnostics, partial residuals, and added variable plots. Random point patterns on a network can be generated using a variety of models. License: GPL (>= 2) URL: http://spatstat.org/ NeedsCompilation: yes ByteCompile: true BugReports: https://github.com/spatstat/spatstat.linnet/issues Packaged: 2021-12-11 10:00:05 UTC; adrian Author: Adrian Baddeley [aut, cre], Rolf Turner [aut], Ege Rubak [aut], Ottmar Cronie [ctb], Tilman Davies [ctb], Greg McSwiggan [ctb], Suman Rakshit [ctb] Repository: CRAN Date/Publication: 2021-12-11 19:30:05 UTC spatstat.linnet/tests/0000755000176200001440000000000014141460471014551 5ustar liggesusersspatstat.linnet/tests/testsL.R0000644000176200001440000006011214141460471016152 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.linnet #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.linnet) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) # # tests/lppstuff.R # # Tests for lpp code # # $Revision: 1.70 $ $Date: 2021/07/01 02:16:16 $ local({ if(ALWAYS) { #' make test data Xsimple <- runiflpp(6, simplenet) Xchic <- chicago[c(TRUE, FALSE)] Xspid <- spiders[c(TRUE, FALSE)] Xdend <- dendrite[seq_len(npoints(dendrite)) %% 8 == 0] } if(FULLTEST) { #' lpp class support Xone <- Xsimple %mark% runif(6) Xtwo <- Xsimple %mark% data.frame(a=1:6, b=runif(6)) print(summary(Xone)) print(summary(Xtwo)) plot(Xsimple, show.window=TRUE) plot(Xone) plot(Xtwo, do.several=FALSE) #' geometry etc rotate(Xsimple, pi/3, centre=c(0.2,0.3)) superimpose.lpp(L=simplenet) W <- Window(Xsimple) #' cut.lpp tes <- lineardirichlet(Xsimple[1:4]) f <- as.linfun(tes) Z <- as.linim(f) cut(Xsimple, tes) cut(Xsimple, f) cut(Xsimple, Z) #' check 'normalise' option in linearKinhom fit <- lppm(Xsimple ~x) K <- linearKinhom(Xsimple, lambda=fit, normalise=FALSE) plot(K) g <- linearpcfinhom(Xsimple, lambda=fit, normalise=FALSE) plot(g) K <- linearKinhom(Xsimple, lambda=fit, normalise=TRUE) plot(K) g <- linearpcfinhom(Xsimple, lambda=fit, normalise=TRUE) plot(g) ## other code blocks K <- linearKinhom(Xsimple, lambda=fit, correction="none", ratio=TRUE) g <- linearpcf(Xsimple, correction="none", ratio=TRUE) g1 <- linearpcf(Xsimple[1], ratio=TRUE) K1 <- linearKcross(dendrite[1], "thin", "thin", ratio=TRUE) ## check empty patterns OK X0 <- runiflpp(0, simplenet) print(X0) g <- linearpcf(X0, ratio=TRUE) } ## nearest neighbour distances eps <- sqrt(.Machine$double.eps) f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k+1) } g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) } if(ALWAYS) { nn <- nndist(Xspid) nnP <- f(pairdist(Xspid), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.lpp does not agree with pairdist.lpp") nw <- nnwhich(Xspid) nwP <- g(pairdist(Xspid), 1) if(any(nw != nwP)) stop("nnwhich.lpp does not agree with pairdist") } if(FULLTEST) { #' code blocks in nndist.lpp/nnwhich.lpp #' non-sparse network, interpreted code Ad <- nndist(Xspid, method="interpreted") Aw <- nnwhich(Xspid, method="interpreted") #' sparse network, older C code opa <- spatstat.options(Cnndistlpp=FALSE) Bd <- nndist(dendrite) Bw <- nnwhich(dendrite) spatstat.options(opa) #' undefined nearest neighbours Ed <- nndist(Xspid[1:3], k=1:3) Ew <- nnwhich(Xspid[1:3], k=1:3) #' trivial cases in nncross.lpp a <- nncross(runiflpp(0, simplenet), runiflpp(1, simplenet), what="which", format="list")$which a <- nncross(runiflpp(0, simplenet), runiflpp(1, simplenet), what="dist", format="list")$dist } if(ALWAYS) { #' compare algorithms ZZ <- split(Xchic) XX <- ZZ$damage YY <- ZZ$assault op <- spatstat.options(Cnncrosslpp=FALSE) a <- nncross(XX, YY) spatstat.options(Cnncrosslpp=TRUE) b <- nncross(XX, YY) if(any(a$which != b$which)) stop("Inconsistent values of nncross.lpp()$which from different C code") if(max(abs(a$dist - b$dist)) > eps) stop("Inconsistent values of nncross.lpp()$dist from different C code") spatstat.options(Cnncrosslpp=TRUE) b2 <- nncross(XX, YY, k=1:2, what="which") if(any(b2$which.1 != b$which)) stop("inconsistent values of nncross.lpp()$which from k=1:2 and k=1") a2 <- nncross(XX, YY, k=1:2, what="dist") if(max(abs(a2$dist.1 - a$dist)) > eps) stop("Inconsistent values of nncross.lpp()$dist from k=1:2 and k=1") spatstat.options(Cnncrosslpp=TRUE) ii <- seq_len(npoints(XX)) w1 <- nnwhich(XX) w2 <- nncross(XX, XX, iX=ii, iY=ii, what="which") w3 <- nncross(XX, XX, iX=ii, iY=ii, what="which", method="interpreted") if(any(w1 != w2)) stop("nnwhich.lpp disagrees with nncross.lpp(iX, iY)") if(any(w2 != w3)) stop("Different results for nncross.lpp(iX, iY, 'which') using R and C") d1 <- nndist(XX) d2 <- nncross(XX, XX, iX=ii, iY=ii, what="dist") d3 <- nncross(XX, XX, iX=ii, iY=ii, what="dist", method="interpreted") if(max(abs(d1-d2)) > eps) stop("nndist.lpp disagrees with nncross.lpp(iX, iY)") if(max(abs(d2-d3)) > eps) stop("Different results for nncross.lpp(iX, iY, 'dist') using R and C") spatstat.options(Cnncrosslpp=FALSE) w4 <- nncross(XX, XX, iX=ii, iY=ii, what="which") d4 <- nncross(XX, XX, iX=ii, iY=ii, what="dist") if(any(w2 != w4)) stop("Different results for nncross.lpp(iX, iY, 'which') fast and slow C") if(max(abs(d2-d4)) > eps) stop("Different results for nncross.lpp(iX, iY, 'dist') fast and slow C") spatstat.options(Cnncrosslpp=TRUE) } reset.spatstat.options() if(FULLTEST) { ## test handling marginal cases xyd <- nncross(XX, YY[1]) A <- runiflpp(5, simplenet) B <- runiflpp(2, simplenet) aaa <- nncross(A,B,k=3:5) #' all undefined aaa <- nncross(A,B,k=1:4) #' some undefined spatstat.options(Cnncrosslpp=FALSE) aaa <- nncross(A,B,k=3:5) aaa <- nncross(A,B,k=1:4) bbb <- nncross(B,A, iX=1:2, iY=1:5) # another code block spatstat.options(Cnncrosslpp=TRUE) reset.spatstat.options() } if(FULLTEST) { ## as.linnet.psp (Suman's example) Lines <- as.data.frame(as.psp(simplenet)) newseg <- c(Lines[1,1:2], Lines[10,3:4]) Lines <- rbind(Lines, newseg) Y <- as.psp(Lines, window=Window(simplenet)) marks(Y) <- c(3, 4, 5, 5, 3, 4, 5, 5,5, 5,1) Z <- as.linnet(Y) # can crash if marks don't match segments ## Test linnet surgery code RL <- joinVertices(simplenet, matrix(c(2,3), ncol=2)) # redundant edge RZ <- joinVertices(Z, matrix(c(2,3), ncol=2), marks=6) # redundant edge JZ <- joinVertices(Z, matrix(c(2,7), ncol=2), marks=6) # new edge set.seed(42) X <- runiflpp(30, simplenet) V <- runiflpp(30, simplenet) XV <- insertVertices(X, V) validate.lpp.coords(XV, context="calculated by insertVertices") X0 <- insertVertices(X, x=numeric(0), y=numeric(0)) ## vertices on boundary of new window LL <- simplenet[boundingbox(vertices(simplenet))] ## Test [.lpp internal data B <- owin(c(0.1,0.7),c(0.19,0.6)) XB <- X[B] validate.lpp.coords(XB, context="returned by [.lpp") } ## Tests related to linearK, etc testcountends <- function(X, r=100, s=1) { if(s != 1) { X <- rescale(X, s) r <- r/s } L <- as.linnet(X) n1 <- countends(L, X[1], r) n2 <- npoints(lineardisc(L, X[1], r, plotit=FALSE)$endpoints) if(n1 != n2) stop(paste("Incorrect result from countends:", n1, "!=", n2, spatstat.utils::paren(paste("scale=", 1/s))), call.=FALSE) } if(ALWAYS) { ## original scale XU <- unmark(Xchic) testcountends(XU) ## finer scale testcountends(XU, s=1000) #' disconnected L <- thinNetwork(simplenet, retainedges = -c(3,8)) S <- as.psp(L) x <- midpoints.psp(S)[1] len <- lengths_psp(S)[1] A <- lineardisc(L, x, len, plotit=FALSE) #involves many segments of network B <- lineardisc(L, x, len/5, plotit=FALSE) # involves one segment of network op <- spatstat.options(Ccountends=FALSE) A <- lineardisc(L, x, len, plotit=FALSE) B <- lineardisc(L, x, len/5, plotit=FALSE) reset.spatstat.options() ## Test algorithms for boundingradius.linnet L <- as.linnet(chicago, sparse=TRUE) L$boundingradius <- NULL # artificially remove opa <- spatstat.options(Clinearradius=FALSE) bR <- as.linnet(L, sparse=FALSE)$boundingradius spatstat.options(Clinearradius=TRUE) bC <- as.linnet(L, sparse=FALSE)$boundingradius spatstat.options(opa) if(abs(bR-bC) > 0.001 * (bR+bC)/2) stop("Disagreement between R and C algorithms for boundingradius.linnet", call.=FALSE) } if(FULLTEST) { ## linnet things is.connected(as.linnet(dendrite)) zik <- rescale(Xchic, 39.37/12, "m") Simon <- simplenet unitname(Simon) <- list("metre", "metres", 0.5) b <- rescale(Simon) ds <- density(simplenet, 0.05) } if(ALWAYS) { ## invoke dist2dpath LS <- as.linnet(simplenet, sparse=TRUE) LF <- as.linnet(LS, sparse=FALSE) ## direct call dist2dpath d <- simplenet$dpath d[!simplenet$m] <- Inf diag(d) <- 0 dd <- dist2dpath(d, method="interpreted") ra <- range(dd - simplenet$dpath) if(max(abs(ra)) > sqrt(.Machine$double.eps)) stop("dist2dpath gives different answers in C and R code") } if(FULLTEST) { ## integral.linim with missing entries xcoord <- linfun(function(x,y,seg,tp) { x }, domain(Xchic)) xcoord <- as.linim(xcoord, dimyx=32) integral(xcoord) ## Math.linim range(xcoord) stopifnot(is.linim(sqrt(xcoord))) stopifnot(is.linim(-xcoord)) stopifnot(is.linim(xcoord + xcoord)) stopifnot(is.linim(xcoord/3)) ## options to plot.linim plot(xcoord, legend=FALSE) plot(xcoord, leg.side="top") plot(xcoord, style="width", leg.side="bottom") ## as.linim.linim xxcc <- as.linim(xcoord) xxcceps <- as.linim(xcoord, eps=15) xxccdel <- as.linim(xcoord, delta=30) df1 <- attr(xxcc, "df") df2 <- attr(xxccdel, "df") df3 <- resampleNetworkDataFrame(df1, df2) ## linim with complex values Zc <- as.im(function(x,y){(x-y) + x * 1i}, Frame(simplenet)) Fc <- linim(simplenet, Zc) print(Fc) print(summary(Fc)) ## linim with df provided Z <- as.im(function(x,y) {x-y}, Frame(simplenet)) X <- linim(simplenet, Z) df <- attr(X, "df") XX <- linim(simplenet, Z, df=df) dfwithout <- df[, colnames(df) != "values"] XXX <- linim(simplenet, Z, df=dfwithout) plot(XXX, zlim=c(-1,1)) plot(XXX, legend=FALSE) plot(XXX, leg.side="bottom") ## lpp with multiple columns of marks M <- Xchic marks(M) <- cbind(type=marks(M), data.frame(distnearest=nndist(M))) plot(M, main="") summary(M) MM <- cut(M) #' other cases CC <- cut(Xchic) nd <- nndist(Xspid) SX <- cut(Xspid %mark% nd, breaks=3) SX <- cut(Xspid, nd, breaks=c(0,100,200,Inf), include.lowest=TRUE) ## linequad Y <- Xsimple %mark% factor(rep(c("A", "B"), 3)) aX <- linequad(Xsimple) aY <- linequad(Y) aXR <- linequad(Xsimple, random=TRUE) aYR <- linequad(Y, random=TRUE) P <- as.ppp(Xsimple) S <- as.psp(domain(Xsimple)) d <- linequad(P, S) oop <- spatstat.options(Clinequad=FALSE) bX <- linequad(Xsimple) spatstat.options(oop) ## other internal utilities df <- pointsAlongNetwork(simplenet, 0.05) X <- as.ppp(df[,c("x", "y")], W=Frame(simplenet)) A <- local2lpp(simplenet, seg=df$seg, tp=df$tp, X=X, df.only=FALSE) ## mark-mark scatterplot uses pairdist X <- runiflpp(20, simplenet) %mark% runif(20) markmarkscatter(X, 0.2) markmarkscatter(X[FALSE], 0.1) ## tree branches ## make a simple tree m <- simplenet$m m[8,10] <- m[10,8] <- FALSE L <- linnet(vertices(simplenet), m) tb <- treebranchlabels(L, 1) X <- runiflpp(30, L) ## delete branch B XminusB <- deletebranch(X, "b", tb) ## extract branch B XB <- extractbranch(X, "b", tb) ## cases of lintess() A <- lintess(simplenet) # argument df missing S <- as.psp(simplenet) ns <- nsegments(S) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) M <- data.frame(len=lengths_psp(S), ang=angles.psp(S)) V <- lintess(simplenet, df, marks=M) ## methods for class lintess U <- unmark(V) U <- unstack(V) print(summary(V)) W <- Window(V) plot(V, style="image") plot(V, style="width") ## linear tessellations infrastructure nX <- 100 nY <- 20 X <- runiflpp(nX, simplenet) Y <- runiflpp(nY, simplenet) tes <- divide.linnet(Y) cX <- coords(X) iI <- lineartileindex(cX$seg, cX$tp, tes, method="interpreted") iC <- lineartileindex(cX$seg, cX$tp, tes, method="C") iE <- lineartileindex(cX$seg, cX$tp, tes, method="encode") if(!identical(iI,iC)) stop("conflicting results from lineartileindex (interpreted vs C)") if(!identical(iI,iE)) stop("conflicting results from lineartileindex (interpreted vs encoded)") iA <- as.linfun(tes)(X) if(!identical(iI, iA)) stop("disagreement between as.linfun.lintess and lineartileindex") ## intersection of lintess X <- divide.linnet(runiflpp(4, simplenet)) Y <- divide.linnet(runiflpp(3, simplenet)) nX <- summary(X)$nt nY <- summary(Y)$nt marks(X) <- factor(letters[1L + (seq_len(nX) %% 2)]) marks(Y) <- runif(nY) Zmm <- intersect.lintess(X,Y) Zum <- intersect.lintess(unmark(X),Y) Zmu <- intersect.lintess(X,unmark(Y)) } if(FULLTEST) { #' handling by 'solist', 'unstack', 'plot.solist' etc L <- simplenet X <- runiflpp(5, L) %mark% cbind(a=1:5, b=letters[1:5]) ns <- nsegments(L) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=letters[1:ns]) S <- lintess(L, df) f <- as.linfun(S) g <- as.linfun(S, values=seq_len(nsegments(L))) V <- as.linim(f) Z <- as.linim(g) shebang <- solist(L=L, X=X, S=S, f=f, g=g, V=V, Z=Z) plot(shebang) plot(shebang, valuesAreColours=FALSE) kapow <- unstack(shebang) plot(kapow) } #' kernel density estimation X <- Xsimple[1:5] w <- runif(npoints(X)) if(ALWAYS) { #' density.lpp -> densityHeat, includes platform-dependent code D <- density(X, 0.05) D <- density(X, 0.05, weights=w) } if(FULLTEST) { #' code blocks D <- density(X[FALSE], 0.05) D <- density(X, Inf) D <- density(X, 0.05, finespacing=TRUE) # } D <- density(X, 0.05, eps=0.008) # } code blocks in resolve.heat.steps D <- density(X, 0.05, dimyx=256) # } } if(ALWAYS) { DX <- density(X, 0.05, at="points", fastmethod="a", debug=TRUE) DX <- density(X, 0.05, at="points", fast=FALSE, debug=TRUE) ## densityfun.lpp, code blocks ff <- densityfun(X, 0.05, nsigma=Inf) } if(FULLTEST) { #' disconnected network L <- thinNetwork(simplenet, retainedges=-c(3,5)) Y <- runiflpp(5, L) D <- density(Y, 0.05) D <- density(Y, 0.05, weights=w) D <- density(Y, Inf) g <- flatdensityfunlpp(Y, disconnect=FALSE) a <- flatdensityatpointslpp(Y, disconnect=FALSE) #' density.lpp -> densityEqualSplit D <- density(X, 0.05, kernel="e", weights=w) D <- density(X, Inf, kernel="e") D <- density(Y, 0.05, kernel="e", weights=w) D <- density(Y, Inf, kernel="e") } if(ALWAYS) { #' density.lpp -> densityQuick.lpp D <- density(X, 0.05, distance="e", weights=runif(5)) } if(FULLTEST) { D <- density(X, Inf, distance="e") D <- density(Y, 0.05, distance="e", weights=runif(5)) D <- density(Y, Inf, distance="e") ## example from Andrea Gilardi ## with duplicated points, and points at a vertex A <- runiflpp(5, simplenet) V <- lpp(vertices(simplenet)[2:4], simplenet) B <- superimpose(A, A[3], V) D <- density(B, sigma=0.2, distance="e", at="points") if(length(as.numeric(D)) != npoints(B)) stop("density.lpp(at='points') returns wrong number of values") G <- superimpose(U=runiflpp(4, simplenet), B=B) b <- bw.relrisklpp(G, distance="e", nh=2) } if(FULLTEST) { #' densityVoronoi.lpp and related code densityVoronoi(X, f=0) densityVoronoi(X, f=1e-8) densityVoronoi(X, f=1) densityVoronoi(X[FALSE], f=0.5) XX <- X[rep(1:npoints(X), 4)] densityVoronoi(XX, f=0.99999, nrep=5) densityVoronoi(Y, f=0) densityVoronoi(Y, f=1e-8) densityVoronoi(Y, f=1) densityVoronoi(Y[FALSE], f=0.5) #' bandwidth selection bw.voronoi(X, nrep=4, prob=c(0.2, 0.4, 0.6)) bw.lppl(X) sug <- seq(0.1, 0.3, by=0.05) bw.lppl(X, sigma=sug) bw.lppl(X, sigma=sug, shortcut=FALSE) bw.lppl(X, sigma=sug, distance="path", shortcut=FALSE) } if(ALWAYS) { #' inhomogeneous K and g SD <- split(Xdend) DT <- density(SD[["thin"]], 100, distance="euclidean") DS <- density(SD[["stubby"]], 100, distance="euclidean") Kii <- linearKcross.inhom(dendrite, "thin", "thin", DT, DT) Kij <- linearKcross.inhom(dendrite, "thin", "stubby", DT, DS, correction="none", ratio=TRUE) gii <- linearpcfcross.inhom(dendrite, "thin", "thin", DT, DT) gij <- linearpcfcross.inhom(dendrite, "thin", "stubby", DT, DS, correction="none", ratio=TRUE) } if(FULLTEST) { Kx <- linearKcross.inhom(dendrite[1], "thin", "stubby", DT, DS) gx <- linearpcfcross.inhom(dendrite[1], "thin", "stubby", DT, DS) } if(FULLTEST) { ## infrastructure for density.lpp L <- domain(Xchic) A <- resolve.heat.steps(100, L=L, dx=1) B <- resolve.heat.steps(100, L=L, dt=0.2) C <- resolve.heat.steps(100, L=L, niter=1e5) C <- resolve.heat.steps(100, L=L, niter=1e5, nsave=3) C <- resolve.heat.steps(100, L=L, niter=1e5, nsave=Inf) D <- resolve.heat.steps(100, L=L, dx=1, dt=0.2) E <- resolve.heat.steps(500, L=L, dt=0.5, iterMax=2e5) A <- resolve.heat.steps(1, L=simplenet, dt=2, dx=0.05) B <- resolve.heat.steps(1, L=simplenet) C <- resolve.heat.steps(1, L=simplenet, finespacing=FALSE) D <- resolve.heat.steps(1, seglengths=rep(0.7, 5), maxdegree=4, AMbound=7) ## vertices on boundary L <- simplenet L <- L[boundingbox(vertices(L))] X <- as.lpp(0.41259, 0.6024, L=L) D <- density(X, 0.1) } if(FULLTEST) { #' multitype #' density.splitppx Z <- split(Xchic)[1:3] D <- density(Z, 7) #' relrisk.lpp for 3 types U <- do.call(superimpose, Z) A <- relrisk(U, 20, control=1, relative=TRUE) A <- relrisk(U, 20, control=1, relative=TRUE, at="points") #' relrisk.lpp for 2 types V <- do.call(superimpose, Z[1:2]) A <- relrisk(V, 20, control=2, casecontrol=FALSE) # issues warning B <- relrisk(V, 20, control="assault", case=2, relative=TRUE) D <- relrisk(V, 20, control=1, case="burglary", relative=TRUE, at="points") #' bw.relrisklpp b1 <- bw.relrisklpp(V, hmax=3, fudge=0.01, method="leastsquares", reference="sigma") b2 <- bw.relrisklpp(V, hmax=3, fudge=0.01, method="KelsallDiggle", reference="uniform") } if(FULLTEST) { #' pairs.linim Z <- density(Xsimple, 0.5, distance="euclidean") pairs(solist(Z)) pairs(solist(A=Z)) U <- density(as.ppp(Xsimple), 0.5) pairs(solist(U, Z)) pairs(solist(Z, U)) } if(FULLTEST) { #' complex-valued functions and images f <- function(x,y,seg,tp) { x + y * 1i } g <- linfun(f, simplenet) h <- as.linim(g) plot(Re(h)) plot(h) plot(g) integral(h) integral(g) } ## 'lixellate' - involves C code if(ALWAYS) { ## Cases where no subdivision occurs P <- runiflpp(4, simplenet) A <- lixellate(P, nsplit=1) B <- lixellate(P, eps=2) ## bug in 'lixellate' (Jakob Gulddahl Rasmussen) X <- ppp(c(0,1), c(0,0), owin()) L <- linnet(X, edges = matrix(1:2, ncol=2)) Y <- lpp(X, L) ## The left end point is OK lixellate(Y[1], nsplit=30) d <- density(Y[1], .1) ## The right end point gave an error lixellate(Y[2], nsplit=30) d <- density(Y[2], .1) } if(FULLTEST) { ## make some bad data and repair it L <- simplenet ## reverse edges a <- L$from[c(FALSE,TRUE)] L$from[c(FALSE,TRUE)] <- L$to[c(FALSE,TRUE)] L$to[c(FALSE,TRUE)] <- a ## duplicate edges ns <- nsegments(L) ii <- c(seq_len(ns), 2) L$from <- L$from[ii] L$to <- L$to[ii] L$lines <- L$lines[ii] ## Y <- repairNetwork(L) ## add points X <- runiflpp(4, L) Z <- repairNetwork(X) } if(FULLTEST) { #' random generation bugs and code blocks A <- runiflpp(5, simplenet, nsim=2) D <- density(A[[1]], 0.3) B <- rpoislpp(D, nsim=2) stopifnot(is.multitype(rlpp(c(10,5), list(a=D,b=D)))) stopifnot(is.multitype(rlpp(5, list(a=D,b=D)))) stopifnot(is.multitype(rlpp(c(10,5), D))) } ## rhohat.lppm if(FULLTEST) { fut <- lppm(Xspid ~ 1) rx <- rhohat(fut, "x") Z <- linfun(function(x,y,seg,tp) { x }, domain(Xspid)) rZ <- rhohat(fut, Z) U <- predict(rx) U <- predict(rZ) Y <- simulate(rx) Y <- simulate(rZ) futm <- lppm(Xchic ~ x + marks) ry <- rhohat(futm, "y") U <- predict(ry) Y <- simulate(ry) } if(ALWAYS) { #' new code for pairdist.lpp X <- runiflpp(10, simplenet) dC <- pairdist(X, method="C") dI <- pairdist(X, method="interpreted") if(max(abs(dC - dI)) > 0.01) stop("pairdist.lpp: disagreement between C and R") XS <- as.lpp(X, sparse=TRUE) dS <- pairdist(XS) if(max(abs(dC - dS)) > 0.01) stop("pairdist.lpp: disagreement between sparse and non-sparse C") dU <- pairdist(XS, method="testsymm") # secret option if(max(abs(dS - dU)) > 0.01) stop("pairdist.lpp: disagreement between symmetric and asymmetric") #' new code for crossdist.lpp #' non-sparse v <- split(Xchic) X <- v$cartheft Y <- v$burglary dC <- crossdist(X, Y, method="C") dI <- crossdist(X, Y, method="interpreted") if(max(abs(dC - dI)) > 0.01) stop("crossdist.lpp (non-sparse): disagreement between C and R") #' convert to sparse Chuck <- as.lpp(Xchic, sparse=TRUE) V <- split(Chuck) X <- V$cartheft Y <- V$burglary dS <- crossdist(X, Y) if(max(abs(dS - dC)) > 0.01) stop("crossdist.lpp: disagreement between sparse and non-sparse") #' disconnected L <- thinNetwork(simplenet, retainedges = -c(3,8)) X <- runiflpp(20, L) Y <- runiflpp(15, L) d <- crossdist(X,Y) ## example where the path is very long (covers a previous bug) X <- dendrite[c(349,563)] a <- pairdist(X, method="testsymm") if(max(abs(a-t(a))) > 0.01) stop("pairdist.lpp: asymmetry for long distances") b <- pairdist(as.lpp(X, sparse=FALSE)) if(max(abs(a-b)) > 0.01) stop("pairdist.lpp: disagreement sparse vs non-sparse for long distance") } }) reset.spatstat.options() #' #' lppmodels.R #' #' Tests of lppm and class support #' #' $Revision: 1.2 $ $Date: 2020/06/12 00:24:09 $ #' local({ if(ALWAYS) { fit0 <- lppm(spiders) fit1 <- lppm(spiders ~ x) summary(fit0) summary(fit1) pseudoR2(fit0) pseudoR2(fit1) } if(FULLTEST) { fit2 <- lppm(chicago ~ x+y) summary(fit2) pseudoR2(fit2) } if(ALWAYS) { X <- runiflpp(10, simplenet) Z <- distfun(runiflpp(10, simplenet)) fit3 <- lppm(X ~ Z) summary(fit3) pseudoR2(fit3) } Window(fit1) if(ALWAYS) a <- model.images(fit0) if(FULLTEST) { a <- model.images(fit1) a <- model.images(fit2) a <- model.images(fit3) } if(ALWAYS) b <- model.matrix(fit0) if(FULLTEST) { b <- model.matrix(fit1) b <- model.matrix(fit2) b <- model.matrix(fit3) } if(ALWAYS) is.multitype(fit0) if(FULLTEST) { is.multitype(fit1) is.multitype(fit2) is.multitype(fit3) } if(ALWAYS) fit0e <- emend(fit0) if(FULLTEST) { fit1e <- emend(fit1) fit2e <- emend(fit2) fit3e <- emend(fit3) } #' fundamental utilities: #' evalCovar ycoord <- function(x,y) { y } if(ALWAYS) YS <- as.linim(ycoord, L=domain(spiders)) if(FULLTEST) YC <- as.linim(ycoord, L=domain(chicago)) if(ALWAYS) aT <- evalCovar(fit1, YS, interpolate=TRUE) if(FULLTEST) { aF <- evalCovar(fit1, YS, interpolate=FALSE) dT <- evalCovar(fit1, ycoord, interpolate=TRUE) dF <- evalCovar(fit1, ycoord, interpolate=FALSE) bT <- evalCovar(fit2, YC, interpolate=TRUE) bF <- evalCovar(fit2, YC, interpolate=FALSE) cT <- evalCovar(fit2, ycoord, interpolate=TRUE) cF <- evalCovar(fit2, ycoord, interpolate=FALSE) } }) spatstat.linnet/tests/testsMtoZ.R0000644000176200001440000000322114141460471016646 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.linnet #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.linnet) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) ## ## tests/segments.R ## Tests of psp class and related code ## [SEE ALSO: tests/xysegment.R] ## ## $Revision: 1.32 $ $Date: 2020/12/04 05:26:31 $ if(FULLTEST) { local({ ## more tests of lppm code fit <- lppm(unmark(chicago) ~ polynom(x,y,2)) Z <- predict(fit) }) } # # tests/undoc.R # # $Revision: 1.16 $ $Date: 2020/11/02 07:06:49 $ # # Test undocumented hacks, experimental code, etc if(FULLTEST) { local({ ## linim helper functions df <- pointsAlongNetwork(simplenet, 0.2) }) } ## ## tests/updateppm.R ## ## Check validity of update.ppm ## ## $Revision: 1.7 $ $Date: 2020/11/02 07:07:42 $ local({ if(ALWAYS) { ## test update.lppm X <- runiflpp(20, simplenet) fit0 <- lppm(X ~ 1) fit1 <- update(fit0, ~ x) anova(fit0, fit1, test="LR") cat("update.lppm(fit, ~trend) is OK\n") fit2 <- update(fit0, . ~ x) anova(fit0, fit2, test="LR") cat("update.lppm(fit, . ~ trend) is OK\n") } }) ## ## tests/xysegment.R ## [SEE ALSO tests/segments.R] ## ## Test weird problems and boundary cases for line segment code ## ## $Version$ $Date: 2020/11/02 07:11:48 $ ## if(FULLTEST) local({ S <- as.psp(simplenet) }) spatstat.linnet/tests/testsAtoK.R0000644000176200001440000001214014141460471016613 0ustar liggesusers#' #' Header for all (concatenated) test files #' #' Require spatstat.linnet #' Obtain environment variable controlling tests. #' #' $Revision: 1.5 $ $Date: 2020/04/30 05:31:37 $ require(spatstat.linnet) FULLTEST <- (nchar(Sys.getenv("SPATSTAT_TEST", unset="")) > 0) ALWAYS <- TRUE cat(paste("--------- Executing", if(FULLTEST) "** ALL **" else "**RESTRICTED** subset of", "test code -----------\n")) #' tests/aucroc.R #' #' AUC and ROC code #' #' $Revision: 1.6 $ $Date: 2020/11/02 06:26:45 $ local({ if(FULLTEST) { A <- roc(spiders, "x") B <- auc(spiders, "y") fut <- lppm(spiders ~ I(y-x)) f <- roc(fut) g <- auc(fut) } }) ## tests/cdf.test.R local({ NSIM <- 9 op <- spatstat.options(ndummy.min=16, npixel=32) if(ALWAYS) { ## (3) linear networks set.seed(42) X <- runiflpp(20, simplenet) cdf.test(X, "x") if(FULLTEST) { cdf.test(X, "x", "cvm") cdf.test(X %mark% runif(20), "x") } fit <- lppm(X ~1) cdf.test(fit, "y", "cvm", nsim=NSIM) if(FULLTEST) { cdf.test(fit, "y", nsim=NSIM) cdf.test(fit, "y", "ad", nsim=NSIM) } if(FULLTEST) { ## marked cdf.test(chicago, "y") cdf.test(subset(chicago, marks != "assault"), "y") } } reset.spatstat.options() }) #' #' tests/cluck.R #' #' Tests of "click*" functions #' using queueing feature of spatstatLocator #' #' $Revision: 1.7 $ $Date: 2020/11/02 06:53:30 $ local({ Y <- coords(runiflpp(6, simplenet)) if(FULLTEST) { #' clicklpp spatstat.utils::queueSpatstatLocator(Y) XL <- clicklpp(simplenet) } if(ALWAYS) { spatstat.utils::queueSpatstatLocator(Y) XM <- clicklpp(simplenet, n=3, types=c("a", "b")) } if(ALWAYS) { #' lineardisc plot(simplenet) spatstat.utils::queueSpatstatLocator(as.ppp(runiflpp(1, simplenet))) V <- lineardisc(simplenet, r=0.3) } }) #' #' tests/disconnected.R #' #' disconnected linear networks #' #' $Revision: 1.4 $ $Date: 2020/04/28 12:58:26 $ local({ #' disconnected network m <- simplenet$m m[4,5] <- m[5,4] <- m[6,10] <- m[10,6] <- m[4,6] <- m[6,4] <- FALSE L <- linnet(vertices(simplenet), m) if(FULLTEST) { L summary(L) is.connected(L) Z <- connected(L, what="components") } #' point pattern with no points in one connected component set.seed(42) X <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L) B <- lineardirichlet(X) if(FULLTEST) { plot(B) summary(B) } if(ALWAYS) { D <- pairdist(X) A <- nndist(X) } if(FULLTEST) { H <- nnwhich(X) Y <- rpoislpp(lambda=function(x,y) { 10 * (x < 0.5)}, L) G <- nncross(X, Y) J <- crossdist(X, Y) plot(distfun(X)) # includes evaluation of nncross(what="dist") } #' K functions in disconnected network if(ALWAYS) { K <- linearK(X) lamX <- intensity(X) nX <- npoints(X) KI <- linearKinhom(X, lambda=rep(lamX, nX)) P <- linearpcf(X) PJ <- linearpcfinhom(X, lambda=rep(lamX, nX)) } Y <- X %mark% factor(rep(1:2, nX)[1:nX]) if(FULLTEST) { Y1 <- split(Y)[[1]] Y2 <- split(Y)[[2]] KY <- linearKcross(Y) PY <- linearpcfcross(Y) KYI <- linearKcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)), lambdaJ=rep(intensity(Y2), npoints(Y2))) PYI <- linearpcfcross.inhom(Y, lambdaI=rep(intensity(Y1), npoints(Y1)), lambdaJ=rep(intensity(Y2), npoints(Y2))) } #' internal utilities if(FULLTEST) { K <- ApplyConnected(X, linearK, rule=function(...) list()) } }) # # tests/envelopes.R # # Test validity of envelope data # # $Revision: 1.24 $ $Date: 2020/11/02 06:53:20 $ # if(FULLTEST) { local({ X <- runiflpp(10, simplenet) Xr <- X %mark% runif(10) Xc <- X %mark% factor(letters[c(1:4,3,2,4:1)]) X2 <- X %mark% data.frame(height=runif(10), width=runif(10)) E <- envelope(X, linearK, nsim=9) Er <- envelope(Xr, linearK, nsim=9) Ec <- envelope(Xc, linearK, nsim=9) E2 <- envelope(X2, linearK, nsim=9) Erf <- envelope(Xr, linearK, nsim=9, fix.n=TRUE) E2f <- envelope(X2, linearK, nsim=9, fix.n=TRUE) Ecf <- envelope(Xc, linearK, nsim=9, fix.n=TRUE) Ecm <- envelope(Xc, linearKcross, nsim=9, fix.n=TRUE, fix.marks=TRUE) fut <- lppm(Xc ~ marks) EEf <- envelope(fut, linearK, fix.n=TRUE) EEm <- envelope(fut, linearKcross, fix.n=TRUE, fix.marks=TRUE) }) } # # tests/func.R # # $Revision: 1.8 $ $Date: 2020/12/03 03:28:44 $ # # Tests of 'funxy' infrastructure etc if(FULLTEST) { local({ ## Check the peculiar function-building code in funxy W <- square(1) f1a <- function(x, y) sqrt(x^2 + y^2) F1a <- funxy(f1a, W) Y <- runiflpp(5, simplenet) b <- F1a(Y) }) } #' tests/hypotests.R #' Hypothesis tests #' #' $Revision: 1.9 $ $Date: 2020/11/02 06:39:23 $ if(FULLTEST) { local({ berman.test(spiders, "x") berman.test(lppm(spiders ~ x), "y") }) } # # tests/imageops.R # # $Revision: 1.32 $ $Date: 2021/04/14 08:57:21 $ # if(FULLTEST) { local({ d <- distmap(cells, dimyx=32) ## linear networks ee <- d[simplenet, drop=FALSE] eev <- d[simplenet] }) } spatstat.linnet/src/0000755000176200001440000000000014141460471014176 5ustar liggesusersspatstat.linnet/src/linvknndist.c0000755000176200001440000001373514141460471016721 0ustar liggesusers#include #include "yesno.h" /* linvknndist.c k-th nearest neighbour function at vertices (distance from each vertex to the nearest, second nearest, ... k-th nearest target data point) Needs only the sparse representation of the network $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 ! Data points must be ordered by segment index ! */ #undef HUH #define DIST(VERTEX, ORDER) dist[(ORDER) + (VERTEX) * Kmax] #define WHICH(VERTEX, ORDER) which[(ORDER) + (VERTEX) * Kmax] #define UPDATE(VERTEX, D, J, EPS) \ UpdateKnnList(D, J, \ dist + (VERTEX) * Kmax, \ which + (VERTEX) * Kmax, \ Kmax, \ EPS) void linvknndist(kmax, /* number of neighbours required */ nq, sq, tq, /* target data points (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments (pairs of vertices) */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ dist, /* distance from each vertex to the nearest, ..., kth nearest data points */ which /* identifies which data points */ ) int *kmax; int *nq, *nv, *ns; /* number of points, vertices, segments */ int *sq, *from, *to; /* integer vectors (mappings) */ double *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; int *which; { int Nq, Nv, Ns, Kmax, Nout, i, j, k, m; int segQj, ivleft, ivright, changed; double hugevalue, eps, slen, d, tqj; char converged; int UpdateKnnList(); Kmax = *kmax; Nq = *nq; Nv = *nv; Ns = *ns; hugevalue = *huge; eps = *tol; /* number of values in 'dist' and in 'which' */ Nout = Nv * Kmax; #ifdef HUH Rprintf("Initialise dist\n"); #endif /* initialise to huge value */ for(i = 0; i < Nout; i++) { dist[i] = hugevalue; which[i] = -1; } #ifdef HUH Rprintf("Run through target points\n"); #endif /* assign value to endpoints of segments containing target points */ for(j = 0; j < Nq; j++) { segQj = sq[j]; tqj = tq[j]; slen = seglen[segQj]; ivleft = from[segQj]; d = slen * tqj; UPDATE(ivleft, d, j, (double) 0.0); ivright = to[segQj]; d = slen * (1.0 - tqj); UPDATE(ivright, d, j, (double) 0.0); } #ifdef HUH Rprintf("Initialised values at vertices:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif /* recursively update */ #ifdef HUH Rprintf("Recursive update\n"); #endif converged = NO; while(!converged) { converged = YES; #ifdef HUH Rprintf("........... starting new pass ...................... \n"); Rprintf("Current state:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif for(m = 0; m < Ns; m++) { ivleft = from[m]; ivright = to[m]; slen = seglen[m]; #ifdef HUH Rprintf("updating right=%d from left=%d\n", ivright, ivleft); #endif for(k = 0; k < Kmax; k++) { changed = UPDATE(ivright, DIST(ivleft, k)+slen, WHICH(ivleft, k), eps); converged = converged && !changed; } #ifdef HUH Rprintf("updating left=%d from right=%d\n", ivleft, ivright); #endif for(k = 0; k < Kmax; k++) { changed = UPDATE(ivleft, DIST(ivright, k)+slen, WHICH(ivright, k), eps); converged = converged && !changed; } } } #ifdef HUH Rprintf("Done\nVertex values:\n"); Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) { Rprintf("\t%d", i); for(k = 0; k < Kmax; k++) Rprintf(" %d ", WHICH(i, k)); for(k = 0; k < Kmax; k++) Rprintf(" %lf ", DIST(i, k)); Rprintf("\n"); } #endif } /* update a list of nearest, second nearest, ..., k-th nearest neighbours */ int UpdateKnnList(d, j, dist, which, Kmax, eps) double d; /* candidate distance */ int j; /* corresponding candidate target point */ int Kmax; double *dist; /* pointer to start of vector of length Kmax */ int *which; /* pointer to start of vector of length Kmax */ double eps; /* numerical tolerance, to prevent infinite loops */ { char matched, unsorted, changed; int k, Klast, itmp; double dtmp, dPlusEps; Klast = Kmax - 1; dPlusEps = d + eps; if(dPlusEps > dist[Klast]) return(NO); changed = NO; /* Check whether this data point is already listed as a neighbour */ matched = NO; for(k = 0; k < Kmax; k++) { if(which[k] == j) { matched = YES; #ifdef HUH Rprintf("\tMatch: which[%d] = %d\n", k, j); #endif if(dPlusEps <= dist[k]) { changed = YES; #ifdef HUH Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[k], d); #endif dist[k] = d; } break; } } if(!matched) { #ifdef HUH Rprintf("\tNo match with current list\n"); Rprintf("\t\tUpdated distance from %lf to %lf\n", dist[Klast], d); #endif /* replace furthest point */ changed = YES; dist[Klast] = d; which[Klast] = j; } /* Bubble sort entries */ if(changed) { #ifdef HUH Rprintf("Bubble sort.\nCurrent state:\n\tk\twhich\tdist\n"); for(k = 0; k <= Klast; k++) Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]); #endif do { unsorted = NO; for(k = 0; k < Klast; k++) { if(dist[k] > dist[k+1]) { unsorted = YES; dtmp = dist[k]; dist[k] = dist[k+1]; dist[k+1] = dtmp; itmp = which[k]; which[k] = which[k+1]; which[k+1] = itmp; } } } while(unsorted); } #ifdef HUH Rprintf("Return state:\n\tk\twhich\tdist\n"); for(k = 0; k <= Klast; k++) Rprintf("\t%d\t%d\t%lf\n", k, which[k], dist[k]); #endif return( (int) changed); } spatstat.linnet/src/heatapprox.c0000644000176200001440000000272214141460471016520 0ustar liggesusers/* Approximation to heat kernel Copyright (c) Greg McSwiggan and Adrian Baddeley 2017-2020 $Revision: 1.1 $ $Date: 2020/04/05 03:38:35 $ */ #include #include void heatApprox(n, a, x, y, s, degl, degr, m, z) int *n; /* number of calculations (length of each vector) */ double *a; /* rod length */ double *x; /* source position */ double *y; /* query position */ double *s; /* bandwidth */ int *degl; /* vertex degree of left endpoint */ int *degr; /* vertex degree of right endpoint */ int *m; /* number of terms in sum */ double *z; /* result */ { register int i, k, N, M, dL, dR; register double Z, A, twoA, Bk, X, Y, sigma, fL, fR, fLfR, cc; N = *n; M = *m; for(i = 0; i < N; i++) { sigma = s[i]; A = a[i]; if(A <= 0.0 || sigma <= 0.0) { /* trap bad data */ z[i] = 0.0; } else { /* do calculation */ X = x[i]; Y = y[i]; dL = degl[i]; dR = degr[i]; fL = (2.0/dL - 1.0); fR = (2.0/dR - 1.0); fLfR = fL * fR; twoA = 2.0 * A; Z = dnorm(Y, X, sigma, (int) 0); cc = 1.0; for(k = 1; k <= M; k++) { Bk = k * twoA; Z += cc * (fR * dnorm( Bk - Y, X, sigma, (int) 0) + fLfR * dnorm( Bk + Y, X, sigma, (int) 0) + fL * dnorm(-Bk + Y, X, sigma, (int) 0) + fLfR * dnorm(-Bk - Y, X, sigma, (int) 0)); cc *= fLfR; } z[i] = Z; } } } spatstat.linnet/src/linknnd.h0000755000176200001440000001030714141460471016010 0ustar liggesusers/* linknnd.h k-th nearest neighbours in a linear network Using sparse representation of network ! Data points must be ordered by segment index ! This code is #included several times in linknnd.c Macros required: FNAME Function name CROSS #defined for X-to-Y, undefined for X-to-X HUH debugging flag $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define MAT(MATRIXNAME, INDEX, ORDER) MATRIXNAME[(ORDER) + (INDEX) * Kmax] #define NNDIST(INDEX, ORDER) MAT(nndist, (INDEX), (ORDER)) #define NNWHICH(INDEX, ORDER) MAT(nnwhich, (INDEX), (ORDER)) #define VDIST(INDEX, ORDER) MAT(dminvert, (INDEX), (ORDER)) #define VWHICH(INDEX, ORDER) MAT(whichvert, (INDEX), (ORDER)) #define UPDATENN(INDEX, D, J) \ UpdateKnnList(D, J, \ nndist + (INDEX) * Kmax, \ nnwhich + (INDEX) * Kmax, \ Kmax, \ (double) 0.0) /* ................. */ void FNAME(kmax, /* number of neighbours required */ np, sp, tp, /* source data points (ordered by sp) */ #ifdef CROSS nq, sq, tq, /* target data points (ordered by sq) */ #endif nv, /* number of network vertices */ ns, from, to, /* segments (pairs of vertices) */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ nndist, /* distance from each source point to the nearest, ..., kth nearest target points */ nnwhich /* identifies which target points */ ) int *kmax; int *np, *nv, *ns; /* number of points, vertices, segments */ int *sp, *from, *to; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ #ifdef CROSS int *nq, *sq; double *tq; #endif double *huge, *tol; double *seglen; double *nndist; int *nnwhich; { int Np, Nv, Kmax, Nout, i, j, ivleft, ivright, jfirst, jlast, k, m; double d, hugevalue, slen, tpi, deltad; double *dminvert; /* min dist from each vertex */ int *whichvert; /* which min from each vertex */ int linvknndist(), UpdateKnnList(); #ifdef CROSS int Nq; #else #define Nq Np #define nq np #define sq sp #define tq tp #endif Kmax = *kmax; Np = *np; Nv = *nv; hugevalue = *huge; #ifdef CROSS Nq = *nq; #endif /* First compute min distances to target set from each vertex */ #ifdef HUH Rprintf("Computing distances from each vertex\n"); #endif dminvert = (double *) R_alloc(Nv * Kmax, sizeof(double)); whichvert = (int *) R_alloc(Nv * Kmax, sizeof(int)); linvknndist(kmax, nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert, whichvert); #ifdef HUH Rprintf("Initialise answer\n"); #endif /* initialise nn distances from source points */ Nout = Np * Kmax; for(i = 0; i < Nout; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* run through all source points */ #ifdef HUH Rprintf("Run through source points\n"); #endif jfirst = 0; for(i = 0; i < Np; i++) { tpi = tp[i]; m = sp[i]; /* segment containing this point */ slen = seglen[m]; ivleft = from[m]; ivright = to[m]; #ifdef HUH Rprintf("Source point %d lies on segment %d = [%d,%d]\n", i, m, ivleft, ivright); #endif deltad = slen * tpi; #ifdef HUH Rprintf("\tComparing to left endpoint %d, distance %lf\n", ivleft, deltad); #endif for(k = 0; k < Kmax; k++) UPDATENN(i, deltad + VDIST(ivleft, k), VWHICH(ivleft, k)); deltad = slen * (1.0 - tpi); #ifdef HUH Rprintf("\tComparing to right endpoint %d, distance %lf\n", ivright, deltad); #endif for(k = 0; k < Kmax; k++) UPDATENN(i, deltad + VDIST(ivright, k), VWHICH(ivright, k)); /* find any target points in this segment */ while(jfirst < Nq && sq[jfirst] < m) jfirst++; jlast = jfirst; while(jlast < Nq && sq[jlast] == m) jlast++; --jlast; /* if there are no such points, then jlast < jfirst */ if(jfirst <= jlast) { for(j = jfirst; j <= jlast; j++) { d = slen * fabs(tq[j] - tpi); UPDATENN(i, d, j); } } } } #undef MAT #undef NNDIST #undef NNWHICH #undef VDIST #undef VWHICH #undef UPDATENN #ifndef CROSS #undef nq #undef Nq #undef sq #undef tq #endif spatstat.linnet/src/linSpairdist.h0000644000176200001440000000600514141460471017015 0ustar liggesusers/* linSpairdist.h Function body definitions with macros (included in linSpairdist.c) Pairwise distances Sparse representation of network $Revision: 1.4 $ $Date: 2020/05/12 03:37:23 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros used: FNAME name of function SYMMETRIC whether to exploit symmetry of distance matrix VERBOSE debugging ! Data points must be ordered by segment index ! ! Result must be initialised to zero ! */ void FNAME(np, sp, tp, /* data points (ordered by sp) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ dist /* matrix of distances from i to j , INITIALISED TO ZERO */ ) int *np, *nv, *ns; int *from, *to, *sp; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; { int Np, Nv, i, j, ivleft, ivright, spi, spj; double dleft, dright, dij, slen, tpi, tpj; double *dminvert; /* min dist from each vertex */ int one; Np = *np; Nv = *nv; if(Np <= 1) return; one = 1; dminvert = (double *) R_alloc(Nv, sizeof(double)); #ifdef VERBOSE Rprintf("Start loop through target points j\n"); #endif #ifdef SYMMETRIC for(j = 1; j < Np; j++) #else for(j = 0; j < Np; j++) #endif { R_CheckUserInterrupt(); spj = sp[j]; tpj = tp[j]; #ifdef VERBOSE Rprintf("Target point %d\n\t lies on segment %d\n", j, spj); Rprintf("Compute distance to target from each vertex..\n"); #endif /* First compute min distance to target point j from each vertex */ Clinvdist(&one, sp+j, tp+j, nv, ns, from, to, seglen, huge, tol, dminvert); #ifdef VERBOSE Rprintf("Run through source points..\n"); #endif #ifdef SYMMETRIC for(i = 0; i < j; i++) #else for(i = 0; i < Np; i++) #endif { tpi = tp[i]; spi = sp[i]; /* segment containing this point */ slen = seglen[spi]; if(spi == spj) { /* target point lies in the same segment */ dij = slen * fabs(tpj - tpi); #ifdef VERBOSE Rprintf("\tSource %d and target lie on same segment, distance %lf\n", i, dij); #endif } else { ivleft = from[spi]; ivright = to[spi]; #ifdef VERBOSE Rprintf("\tSource point %d lies on segment %d = [%d,%d]\n", i, spi, ivleft, ivright); #endif dleft = slen * tpi + dminvert[ivleft]; dright = slen * (1.0 - tpi) + dminvert[ivright]; dij = (dleft < dright) ? dleft : dright; #ifdef VERBOSE Rprintf("\tDistance via left endpoint %d is %lf\n", ivleft, dleft); Rprintf("\tDistance via right endpoint %d is %lf\n", ivright, dright); #endif } #ifdef VERBOSE Rprintf("\tAssigning distance d[%d, %d] = %lf\n", i, j, dij); #endif dist[i + j * Np] = dij; #ifdef SYMMETRIC dist[j + i * Np] = dij; #endif } } } spatstat.linnet/src/linknnd.c0000755000176200001440000000100614141460471015777 0ustar liggesusers#include #include "yesno.h" /* linknnd.c k-th nearest neighbours in a linear network Sparse representation of network ! Data points must be ordered by segment index ! $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef HUH #undef CROSS #define FNAME linknnd #include "linknnd.h" #undef FNAME #define CROSS #define FNAME linknncross #include "linknnd.h" #undef CROSS #undef FNAME spatstat.linnet/src/linvdist.c0000755000176200001440000000113414141460471016200 0ustar liggesusers#include #include "yesno.h" /* linvdist.c Distance function at vertices (shortest distance from each vertex to a data point) Sparse representation of network $Revision: 1.1 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 ! Data points must be ordered by segment index ! */ #undef HUH /* definition of Clinvdist */ #define FNAME Clinvdist #undef WHICH #include "linvdist.h" /* definition of Clinvwhichdist */ #undef FNAME #define FNAME Clinvwhichdist #define WHICH #include "linvdist.h" spatstat.linnet/src/linnncross.c0000755000176200001440000000136314141460471016540 0ustar liggesusers#include /* linnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ linndcross linndxcross Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) /* definition of linndcross */ #define FNAME linndcross #undef EXCLU #define WHICH #include "linnncross.h" #undef FNAME #undef EXCLU #undef WHICH /* definition of linndxcross */ #define FNAME linndxcross #define EXCLU #define WHICH #include "linnncross.h" spatstat.linnet/src/init.c0000644000176200001440000000363714144334010015306 0ustar liggesusers /* Native symbol registration table for spatstat.linnet package Automatically generated - do not edit this file! */ #include "proto.h" #include #include #include // for NULL #include /* See proto.h for declarations for the native routines registered below. */ static const R_CMethodDef CEntries[] = { {"Ccountends", (DL_FUNC) &Ccountends, 14}, {"ClineMquad", (DL_FUNC) &ClineMquad, 23}, {"Clinequad", (DL_FUNC) &Clinequad, 18}, {"ClineRMquad", (DL_FUNC) &ClineRMquad, 23}, {"ClineRquad", (DL_FUNC) &ClineRquad, 18}, {"Clinvwhichdist", (DL_FUNC) &Clinvwhichdist, 12}, {"Clixellate", (DL_FUNC) &Clixellate, 16}, {"heatApprox", (DL_FUNC) &heatApprox, 9}, {"lincrossdist", (DL_FUNC) &lincrossdist, 16}, {"linearradius", (DL_FUNC) &linearradius, 8}, {"linknncross", (DL_FUNC) &linknncross, 16}, {"linknnd", (DL_FUNC) &linknnd, 13}, {"linndcross", (DL_FUNC) &linndcross, 18}, {"linndxcross", (DL_FUNC) &linndxcross, 20}, {"linnndist", (DL_FUNC) &linnndist, 13}, {"linnnwhich", (DL_FUNC) &linnnwhich, 14}, {"linpairdist", (DL_FUNC) &linpairdist, 12}, {"linScrossdist", (DL_FUNC) &linScrossdist, 14}, {"linSnndwhich", (DL_FUNC) &linSnndwhich, 15}, {"linSpairdist", (DL_FUNC) &linSpairdist, 11}, {"linSpairUdist", (DL_FUNC) &linSpairUdist, 11}, {"lintileindex", (DL_FUNC) &lintileindex, 9}, {"linvknndist", (DL_FUNC) &linvknndist, 13}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"depthrel", (DL_FUNC) &depthrel, 5}, {NULL, NULL, 0} }; void R_init_spatstat_linnet(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } spatstat.linnet/src/lintileindex.c0000644000176200001440000000406214141460471017034 0ustar liggesusers#include #include #include "chunkloop.h" /* lintileindex.c Given a tessellation on a linear network, compute tile index for each query point NOTE: data are assumed to be sorted by segment $Revision: 1.2 $ $Date: 2019/02/06 08:36:02 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void lintileindex(n, seg, tp, /* query points, sorted by segment */ dfn, dfseg, dft0, dft1, dftile, /* tessellation data, sorted */ answer) /* query points */ int *n; /* number of query points */ int *seg; /* which segment contains this query point*/ double *tp; /* position along segment */ /* tessellation pieces */ int *dfn; /* number of pieces */ int *dfseg; /* which segment contains this piece */ double *dft0, *dft1; /* positions of endpoints of this piece */ int *dftile; /* which tile the piece belongs to */ /* output */ int *answer; /* which tile the query point belongs to */ { int N, M, i, start, finish, j, segi, currentseg, maxchunk; double tpi; N = *n; M = *dfn; currentseg = -1; start = finish = 0; /* answer[] is implicitly initialised to zero, which will serve as NA */ OUTERCHUNKLOOP(i, N, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 1024) { segi = seg[i]; tpi = tp[i]; if(segi > currentseg) { /* advance the bookmark until reaching data for this segment */ while(start < M && dfseg[start] < segi) ++start; if(start == M) { /* Reached end of data for tessellation */ /* All remaining results are NA */ return; } currentseg = dfseg[start]; for(finish = start; finish < M && dfseg[finish] == currentseg; ++finish) ; if(finish == M || dfseg[finish] > currentseg) --finish; } if(currentseg == segi) { for(j = start; j <= finish; ++j) { if(dft0[j] <= tpi && tpi <= dft1[j]) { answer[i] = dftile[j]; break; } } } } } } spatstat.linnet/src/linnncross.h0000755000176200001440000000642714141460471016553 0ustar liggesusers/* linnncross.h Function body definitions with macros $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Macros used: FNAME name of function EXCLU whether serial numbers are provided WHICH whether 'nnwhich' is required Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FNAME(np, xp, yp, /* data points 'from' */ nq, xq, yq, /* data points 'to' */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ #ifdef EXCLU idP, idQ, /* serial numbers for patterns p and q */ #endif huge, /* value taken as infinity */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ #ifdef EXCLU int *idP, *idQ; #endif double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j; int segPi, segQj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xqj, yqj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; #ifdef EXCLU int idPi; #endif #ifdef WHICH int whichmin; #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* main loop */ for(i = 0; i < Np; i++) { xpi = xp[i]; ypi = yp[i]; #ifdef EXCLU idPi = idP[i]; #endif segPi = psegmap[i]; nbi1 = from[segPi]; nbi2 = to[segPi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; #ifdef WHICH whichmin = nnwhich[i]; #endif for(j = 0; j < Nq; j++) { #ifdef EXCLU if(idQ[j] != idPi) { #endif xqj = xq[j]; yqj = yq[j]; segQj = qsegmap[j]; /* compute path distance between i and j */ if(segPi == segQj) { /* points i and j lie on the same segment; use Euclidean distance */ d = EUCLID(xpi, ypi, xqj, yqj); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segQj]; nbj2 = to[segQj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; #ifdef WHICH whichmin = j; #endif } #ifdef EXCLU } #endif } /* commit nn distance for point i */ nndist[i] = dmin; #ifdef WHICH nnwhich[i] = whichmin; #endif } } spatstat.linnet/src/linearradius.c0000755000176200001440000000367014141460471017035 0ustar liggesusers#include #include #include "chunkloop.h" /* linearradius.c Bounding radius in linear network $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG void linearradius(ns, from, to, /* network segments */ lengths, /* segment lengths */ nv, dpath, /* shortest path distances between vertices */ huge, result) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *huge; /* very large value */ double *result; { int Nv, Ns; int i, j, A, B, C, D; double AB, AC, AD, BC, BD, CD; double sAij, sBij, sAiMax, sBiMax, smin; int maxchunk; Nv = *nv; Ns = *ns; smin = *huge; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* indices of endpoints of segment i */ A = from[i]; B = to[i]; AB = lengths[i]; sAiMax = sBiMax = AB/2.0; for(j = 0; j < Ns; j++) { if(j != i) { /* indices of endpoints of segment i */ C = from[j]; D = to[j]; CD = lengths[j]; AC = DPATH(A,C); AD = DPATH(A,D); BC = DPATH(B,C); BD = DPATH(B,D); /* max dist from A to any point in segment j */ sAij = (AD > AC + CD) ? AC + CD : (AC > AD + CD) ? AD + CD : (AC + AD + CD)/2.0; /* max dist from B to any point in segment j */ sBij = (BD > BC + CD) ? BC + CD : (BC > BD + CD) ? BD + CD : (BC + BD + CD)/2.0; /* row-wise maximum */ if(sAij > sAiMax) sAiMax = sAij; if(sBij > sBiMax) sBiMax = sBij; } } if(sAiMax < smin) smin = sAiMax; if(sBiMax < smin) smin = sBiMax; } } *result = smin; } spatstat.linnet/src/proto.h0000644000176200001440000000713014144334010015503 0ustar liggesusers#include #include /* Prototype declarations for all native routines in spatstat.linnet package Automatically generated - do not edit! */ /* Functions invoked by .C */ void linScrossdist(int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *); void lincrossdist(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, double *); void heatApprox(int *, double *, double *, double *, double *, int *, int *, int *, double *); void Ccountends(int *, double *, int *, double *, int *, double *, double *, int *, int *, int *, double *, double *, double *, int *); void Clinequad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *); void ClineRquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, double *, double *, int *); void ClineMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *); void ClineRMquad(int *, int *, int *, int *, double *, double *, double *, int *, int *, double *, double *, int *, int *, double *, double *, int *, double *, double *, int *, int *, double *, double *, int *); void linearradius(int *, int *, int *, double *, int *, double *, double *, double *); void lintileindex(int *, int *, double *, int *, int *, double *, double *, int *, int *); void Clixellate(int *, int *, int *, int *, int *, int *, double *, double *, int *, double *, int *, int *, int *, double *, int *, double *); void linnndist(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *); void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linnnwhich(int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, double *, double *, int *); void linknnd(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linknncross(int *, int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linSnndwhich(int *, int *, double *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linndcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, double *, double *, int *); void linndxcross(int *, double *, double *, int *, double *, double *, int *, double *, double *, int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, int *); void Clinvwhichdist(int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linvknndist(int *, int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, int *); void linSpairUdist(int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *); void linSpairdist(int *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *); void linpairdist(int *, double *, double *, int *, double *, double *, double *, int *, int *, double *, int *, double *); /* Functions invoked by .Call */ SEXP depthrel(SEXP, SEXP, SEXP, SEXP, SEXP); spatstat.linnet/src/yesno.h0000755000176200001440000000011614141460471015505 0ustar liggesusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat.linnet/src/lineardisc.c0000755000176200001440000002067614141460471016475 0ustar liggesusers#include #include #include "chunkloop.h" /* lineardisc.c Disc of radius r in linear network Clineardisc determine the linear disc (NOT USED) Ccountends count the number of endpoints $Revision: 1.14 $ $Date: 2019/11/28 20:53:23 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG #ifdef DEBUG void Clineardisc(f, seg, /* centre of disc (local coords, f = tp) */ r, /* radius of disc */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ allinside, boundary, dxv, nendpoints) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ /* OUTPUTS */ int *allinside, *boundary; /* vectors of status for each segment */ double *dxv; /* vector of distances for each vertex */ int *nendpoints; { int Nv, Ns; double f0, rad; int seg0; int i, A, B, fromi, toi, allin, bdry, reachable, nends, maxchunk; double length0, dxA, dxB, dxAvi, dxBvi, residue; double *resid; int *covered; Nv = *nv; Ns = *ns; f0 = *f; seg0 = *seg; rad = *r; /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from x to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; /* visit vertices */ covered = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); OUTERCHUNKLOOP(i, Nv, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 16384) { /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxv[i] = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxv[i]; resid[i] = (residue > 0)? residue : 0; /* determine whether vertex i is inside the disc of radius r */ covered[i] = (residue >= 0); } } /* Now visit line segments. */ nends = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from centre (x, y) */ allin = covered[A] && covered[B]; bdry = !allin; if(bdry) { if(!covered[A]) nends++; if(!covered[B]) nends++; } } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; reachable = (covered[fromi] || covered[toi]); if(reachable) { allin = covered[fromi] && covered[toi] && (resid[fromi] + resid[toi] >= lengths[i]); bdry = !allin; } else allin = bdry = NO; if(bdry) { if(covered[fromi]) nends++; if(covered[toi]) nends++; } } allinside[i] = allin; boundary[i] = bdry; } } *nendpoints = nends; } #endif /* ------------------------------------------------- */ /* count endpoints of several discs in a network */ /* ------------------------------------------------- */ void Ccountends(np, f, seg, /* centres of discs (local coords) */ r, /* radii of discs */ nv, xv, yv, /* network vertices */ ns, from, to, /* network segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ toler, /* tolerance */ nendpoints /* output counts of endpoints */ ) int *np, *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *toler; /* tolerance for merging endpoints and vertices */ /* OUTPUT */ int *nendpoints; { int Np, Nv, Ns; double f0, rad; int seg0; int i, m, A, B, fromi, toi, reachable, nends, maxchunk, covfrom, covto, allin; double length0, dxA, dxB, dxAvi, dxBvi, dxvi, residue, resfrom, resto, tol; double *resid; int *covered, *terminal; Np = *np; Nv = *nv; Ns = *ns; tol = *toler; #ifdef DEBUG Rprintf("\nTolerance = %lf\n", tol); #endif covered = (int *) R_alloc((size_t) Nv, sizeof(int)); terminal = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); /* loop over centre points */ OUTERCHUNKLOOP(m, Np, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(m, Np, maxchunk, 256) { f0 = f[m]; seg0 = seg[m]; rad = r[m]; #ifdef DEBUG Rprintf("\nCentre point %d lies in segment %d\n", m, seg0); #endif /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from centre to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; #ifdef DEBUG Rprintf("Distances to endpoints: dxA=%lf, dxB=%lf\n", dxA, dxB); #endif nends = 0; /* visit vertices */ for(i = 0; i < Nv; i++) { #ifdef DEBUG Rprintf("\nConsidering vertex %d\n", i); #endif /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxvi = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxvi; #ifdef DEBUG Rprintf("dxAvi = %lf; dxBvi = %lf; residue = %lf\n", dxAvi, dxBvi, residue); #endif if(residue > tol) { resid[i] = residue; covered[i] = YES; terminal[i] = NO; #ifdef DEBUG Rprintf("Vertex is covered\n"); #endif } else if(residue < -tol) { resid[i] = 0; covered[i] = terminal[i] = NO; #ifdef DEBUG Rprintf("Vertex is not covered\n"); #endif } else { /* vertex is within 'tol' of an endpoint - deem it to be one */ resid[i] = 0; covered[i] = terminal[i] = YES; /* vertex is an endpoint of disc */ ++nends; #ifdef DEBUG Rprintf("Vertex is a terminal endpoint\n"); #endif } } #ifdef DEBUG Rprintf("%d terminal endpoints\n", nends); #endif /* Now visit line segments to count any endpoints that are interior to the segments. */ for(i = 0; i < Ns; i++) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from (x0, y0) */ if(!covered[A]) nends++; if(!covered[B]) nends++; #ifdef DEBUG if(!covered[A]) Rprintf("A not covered\n"); if(!covered[B]) Rprintf("B not covered\n"); #endif } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; covfrom = covered[fromi]; covto = covered[toi]; resfrom = resid[fromi]; resto = resid[toi]; reachable = covfrom || covto; #ifdef DEBUG residue = resfrom + resto - lengths[i]; Rprintf("%d: %s %s: %lf + %lf - %lf = %lf sign %s\n", i, (terminal[fromi]) ? "T" : ((covfrom) ? "Y" : "N"), (terminal[toi]) ? "T" : ((covto) ? "Y" : "N"), resfrom, resto, lengths[i], residue, (residue < 0) ? "-" : ((residue > 0) ? "+" : "0")); #endif if(reachable) { residue = resfrom + resto - lengths[i]; allin = covfrom && covto && (residue >= 0); #ifdef DEBUG if(allin) { Rprintf("Covered\n"); } else if((terminal[fromi] || terminal[toi]) && (residue >= - tol * lengths[i])) { Rprintf("Deemed to be covered\n"); } else Rprintf("Reachable\n"); #endif allin = allin || ((terminal[fromi] || terminal[toi]) && (residue >= - tol)); if(!allin) { /* segment is not entirely covered by disc - infer endpoint(s) in interior of segment */ if(covfrom && !terminal[fromi]) nends++; if(covto && !terminal[toi]) nends++; #ifdef DEBUG if(covfrom && !terminal[fromi]) Rprintf("fromi => end\n"); if(covto && !terminal[toi]) Rprintf("toi => end\n"); #endif } } } } nendpoints[m] = nends; } } } spatstat.linnet/src/linequad.c0000755000176200001440000000122514141460471016147 0ustar liggesusers#include #include #include "yesno.h" /* linequad.c make a quadrature scheme on a linear network Clinequad unmarked pattern ClineMquad multitype pattern $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define SWAP(X,Y,TMP) TMP = Y; Y = X; X = TMP #undef HUH #define FUNNAME Clinequad #define FMKNAME ClineMquad #undef ALEA #include "linequad.h" #undef FUNNAME #undef FMKNAME #define FUNNAME ClineRquad #define FMKNAME ClineRMquad #define ALEA #include "linequad.h" #undef FUNNAME #undef FMKNAME #undef ALEA spatstat.linnet/src/depthrel.c0000644000176200001440000000663014141460471016156 0ustar liggesusers#include #include #include /* depthrel.c Find which segments lie in front of/behind other segments x is the projected x-coordinate z is the negative depth (z increases as we move closer to the viewer) Assumes X0 <= X1 */ #define ROUNDED(X) ((float) (X)) SEXP depthrel(SEXP X0, SEXP Z0, SEXP X1, SEXP Z1, SEXP Verb) { double *x0, *z0, *x1, *z1; int i, j, k, m, n, kmax, kmaxnew; int *front, *back; double xleft, xright, dxi, dxj, z0i, z0j, z1i, z1j; char verbose; int status; SEXP out, fout, bout, sout; int *fp, *bp; PROTECT(X0 = AS_NUMERIC(X0)); PROTECT(Z0 = AS_NUMERIC(Z0)); PROTECT(X1 = AS_NUMERIC(X1)); PROTECT(Z1 = AS_NUMERIC(Z1)); PROTECT(Verb = AS_INTEGER(Verb)); x0 = NUMERIC_POINTER(X0); z0 = NUMERIC_POINTER(Z0); x1 = NUMERIC_POINTER(X1); z1 = NUMERIC_POINTER(Z1); verbose = (*(INTEGER_POINTER(Verb)) == 1); n = LENGTH(X0); status = 0; /* initial guess for max number of related pairs */ kmax = 4 * (n+1); /* allocate space for list of related pairs */ k = 0; front = (int *) R_alloc(kmax, sizeof(int)); back = (int *) R_alloc(kmax, sizeof(int)); if(n >= 2) { for(i = 1; i < n; i++) { for(j = 0; j < i; j++) { if(x1[i] > x0[j] && x1[j] > x0[i]) { /* overlap occurs */ /* consider left side */ z0i = z0[i]; z0j = z0[j]; if(x0[j] < x0[i]) { xleft = x0[i]; dxj = x1[j]-x0[j]; if(dxj != 0.0) z0j += (z1[j] - z0j) * ((xleft - x0[j])/dxj); } else { xleft = x0[j]; dxi = x1[i]-x0[i]; if(dxi != 0.0) z0i += (z1[i] - z0i) * ((xleft - x0[i])/dxi); } /* consider right side */ z1i = z1[i]; z1j = z1[j]; if(x1[j] > x1[i]) { xright = x1[i]; dxj = x1[j]-xleft; if(dxj != 0.0) z1j = z0j + (z1j - z0j) * ((xright - xleft)/dxj); } else { xright = x1[j]; dxi = x1[i]-xleft; if(dxi != 0.0) z1i = z0i + (z1i - z0i) * ((xright - xleft)/dxi); } /* now determine which is in front */ if(ROUNDED(z0i) >= ROUNDED(z0j) && ROUNDED(z1i) >= ROUNDED(z1j)) { /* 'i' is in front */ front[k] = i + 1; back[k] = j + 1; } else if(ROUNDED(z0i) <= ROUNDED(z0j) && ROUNDED(z1i) <= ROUNDED(z1j)){ /* 'j' is in front */ front[k] = j + 1; back[k] = i + 1; } else { if(verbose) warning("segments %d and %d cross over", i+1, j+1); status = 1; } k++; if(k >= kmax) { /* storage overflow */ kmaxnew = 2 * kmax; front = (int *) S_realloc((char *) front, kmaxnew, kmax, sizeof(int)); back = (int *) S_realloc((char *) back, kmaxnew, kmax, sizeof(int)); kmax = kmaxnew; } } } } } /* copy to output */ PROTECT(out = NEW_LIST(3)); PROTECT(fout = NEW_INTEGER(k)); PROTECT(bout = NEW_INTEGER(k)); PROTECT(sout = NEW_INTEGER(1)); if(k > 0) { fp = INTEGER_POINTER(fout); bp = INTEGER_POINTER(bout); for(m = 0; m < k; m++) { fp[m] = front[m]; bp[m] = back[m]; } } *(INTEGER_POINTER(sout)) = status; SET_VECTOR_ELT(out, 0, fout); SET_VECTOR_ELT(out, 1, bout); SET_VECTOR_ELT(out, 2, sout); UNPROTECT(9); return(out); } spatstat.linnet/src/chunkloop.h0000755000176200001440000000161514141460471016357 0ustar liggesusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat.linnet/src/linnndist.c0000755000176200001440000001204514141460471016351 0ustar liggesusers#include /* linnndist.c Shortest-path distances between nearest neighbours in linear network $Revision: 1.2 $ $Date: 2018/12/18 02:43:11 $ linnndist linnnwhich Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #define ANSWER(I,J) answer[(J) + Np * (I)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linnndist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ answer /* nearest neighbour distance for each point */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *answer; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) answer[i] = hugevalue; /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = answer[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; /* compute path distance between i and j */ if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn distance for point i */ if(d < dmin) dmin = d; /* update nn distance for point j */ if(d < answer[j]) answer[j] = d; } /* commit nn distance for point i */ answer[i] = dmin; } } void linnnwhich(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* vector of output values */ int *nnwhich; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; int whichmin; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances and identifiers */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; whichmin = nnwhich[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; whichmin = j; } /* update nn for point j */ if(d < nndist[j]) { nndist[j] = d; nnwhich[j] = i; } } /* commit nn for point i */ nndist[i] = dmin; nnwhich[i] = whichmin; } } spatstat.linnet/src/linpairdist.c0000755000176200001440000000445214141460471016674 0ustar liggesusers#include #include #include "chunkloop.h" /* linpairdist.c Shortest-path distances between each pair of points in linear network $Revision: 1.6 $ $Date: 2018/12/18 02:43:11 $ linpairdist Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linpairdist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nv, i, j, Np1, maxchunk; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; Np = *np; Nv = *nv; Np1 = Np - 1; OUTERCHUNKLOOP(i, Np1, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np1, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = ANSWER(j,i) = d; } ANSWER(i,i) = 0; } } } spatstat.linnet/src/linSnncross.c0000755000176200001440000000144414141460471016663 0ustar liggesusers#include #include "yesno.h" /* linSnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ 'Sparse version' Works with sparse representation Does not allow 'exclusion' Requires point data to be ordered by segment index. linSnndcross linSnndwhich Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Clinvdist(), Clinvwhichdist(); /* functions from linvdist.c */ #undef HUH /* definition of linSnndcross */ #define FNAME linSnndcross #undef WHICH #include "linSnncross.h" /* definition of linSnndwhich */ #undef FNAME #define FNAME linSnndwhich #define WHICH #include "linSnncross.h" spatstat.linnet/src/linScrossdist.c0000644000176200001440000000567314141460471017220 0ustar liggesusers#include #include "yesno.h" /* linScrossdist.c Distances between points on a linear network One pattern to another pattern linScrossdist 'Sparse version' $Revision: 1.5 $ $Date: 2020/05/12 03:36:01 $ Works with sparse representation Requires point data to be ordered by segment index. Macros used: VERBOSE debugging ! Data points must be ordered by segment index ! Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef VERBOSE void Clinvdist(); /* function from linvdist.c */ void linScrossdist(np, sp, tp, /* data points 'from' (ordered by sp) */ nq, sq, tq, /* data points 'to' (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ dist /* matrix of distances from i to j */ ) int *np, *nq, *nv, *ns; int *from, *to, *sp, *sq; /* integer vectors (mappings) */ double *tp, *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; { int Np, Nq, Nv, i, j, ivleft, ivright, spi, sqj; double dleft, dright, dij, slen, tpi, tqj; double *dminvert; /* min dist from each vertex */ int one; Np = *np; Nq = *nq; Nv = *nv; one = 1; dminvert = (double *) R_alloc(Nv, sizeof(double)); #ifdef VERBOSE Rprintf("Start loop through target points j\n"); #endif for(j = 0; j < Nq; j++) { R_CheckUserInterrupt(); sqj = sq[j]; tqj = tq[j]; #ifdef VERBOSE Rprintf("Target point %d\n\t lies on segment %d\n", j, sqj); Rprintf("Compute distance to target from each vertex..\n"); #endif /* First compute min distance to target point j from each vertex */ Clinvdist(&one, sq+j, tq+j, nv, ns, from, to, seglen, huge, tol, dminvert); #ifdef VERBOSE Rprintf("Run through source points..\n"); #endif for(i = 0; i < Np; i++) { tpi = tp[i]; spi = sp[i]; /* segment containing this point */ slen = seglen[spi]; if(spi == sqj) { /* target point lies in the same segment */ dij = slen * fabs(tqj - tpi); #ifdef VERBOSE Rprintf("\tSource %d and target lie on same segment, distance %lf\n", i, dij); #endif } else { ivleft = from[spi]; ivright = to[spi]; #ifdef VERBOSE Rprintf("\tSource point %d lies on segment %d = [%d,%d]\n", i, spi, ivleft, ivright); #endif dleft = slen * tpi + dminvert[ivleft]; dright = slen * (1.0 - tpi) + dminvert[ivright]; dij = (dleft < dright) ? dleft : dright; #ifdef VERBOSE Rprintf("\tDistance to left endpoint %d is %lf\n", ivleft, dleft); Rprintf("\tDistance to right endpoint %d is %lf\n", ivright, dright); #endif } #ifdef VERBOSE Rprintf("\tAssigning distance d[%d, %d] = %lf\n", i, j, dij); #endif dist[i + j * Np] = dij; } } } spatstat.linnet/src/linSpairdist.c0000644000176200001440000000204414141460471017007 0ustar liggesusers#include #include "yesno.h" /* linSpairdist.c Distances between points on a linear network linSpairdist linSpairUdist 'Sparse version' $Revision: 1.2 $ $Date: 2020/03/27 01:22:59 $ Works with sparse representation Requires point data to be ordered by segment index. Repeatedly includes 'linSpairdist.h' Macros used: FNAME function name VERBOSE debugging SYMMETRIC whether to exploit the symmetry of the distance matrix ! Data points must be ordered by segment index ! Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #undef VERBOSE #undef SYMMETRIC void Clinvdist(); /* function from linvdist.c */ /* This version computes d[i,j] and d[j,i] separately as a check on validity */ #define FNAME linSpairUdist #include "linSpairdist.h" #undef FNAME /* This is the production version which exploits symmetry d[i,j] = d[j,i] */ #define FNAME linSpairdist #define SYMMETRIC #include "linSpairdist.h" #undef FNAME #undef SYMMETRIC spatstat.linnet/src/linvdist.h0000755000176200001440000000672114141460471016214 0ustar liggesusers/* linvdist.h Distance function at vertices (shortest distance from each vertex to a data point) Function body definitions with macros Sparse representation of network $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros used: FNAME name of function WHICH whether 'nnwhich' is required HUH debugging flag ! Data points must be ordered by segment index ! */ void FNAME(np, sp, tp, /* target data points (ordered by sp) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ #ifdef WHICH dist, /* distance from each vertex to nearest data point */ which /* identifies nearest data point */ #else dist /* distance from each vertex to nearest data point */ #endif ) int *np, *nv, *ns; /* number of points, vertices, segments */ int *sp, *from, *to; /* integer vectors (mappings) */ double *tp; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *dist; #ifdef WHICH int *which; #endif { int Np, Nv, Ns, i, j, k, segPj, ivleft, ivright; double hugevalue, eps, dleft, dright, slen, d, tpj; char converged; Np = *np; Nv = *nv; Ns = *ns; hugevalue = *huge; eps = *tol; #ifdef HUH Rprintf("Initialise dist\n"); #endif /* initialise to huge value */ for(i = 0; i < Nv; i++) { dist[i] = hugevalue; #ifdef WHICH which[i] = -1; #endif } #ifdef HUH Rprintf("Run through target points\n"); #endif /* assign correct value to endpoints of segments containing target points */ for(j = 0; j < Np; j++) { segPj = sp[j]; tpj = tp[j]; slen = seglen[segPj]; ivleft = from[segPj]; d = slen * tpj; if(d < dist[ivleft]) { dist[ivleft] = d; #ifdef WHICH which[ivleft] = j; #endif } ivright = to[segPj]; d = slen * (1.0 - tpj); if(d < dist[ivright]) { dist[ivright] = d; #ifdef WHICH which[ivright] = j; #endif } } /* recursively update */ #ifdef HUH Rprintf("Recursive update\n"); #endif converged = NO; while(!converged) { converged = YES; #ifdef HUH Rprintf("........... starting new pass ...................... \n"); #endif for(k = 0; k < Ns; k++) { ivleft = from[k]; ivright = to[k]; slen = seglen[k]; dleft = (double) dist[ivleft]; dright = (double) dist[ivright]; d = (double) (dleft + slen); if(d < dright - eps) { #ifdef HUH Rprintf("Updating ivright=%d using ivleft=%d, from %lf to %lf+%lf=%lf\n", ivright, ivleft, dright, dleft, slen, d); #endif converged = NO; dist[ivright] = d; #ifdef WHICH which[ivright] = which[ivleft]; #endif } else { d = (double) (dright + slen); if(d < dleft - eps) { #ifdef HUH Rprintf("Updating ivleft=%d using ivright=%d, from %lf to %lf+%lf=%lf\n", ivleft, ivright, dleft, dright, slen, d); #endif converged = NO; dist[ivleft] = d; #ifdef WHICH which[ivleft] = which[ivright]; #endif } } } } #ifdef HUH Rprintf("Done\nVertex values:\n"); #ifdef WHICH Rprintf("\ti\twhich\tdist\n"); for(i = 0; i < Nv; i++) Rprintf("\t%d\t%d\t%lf\n", i, which[i], dist[i]); #else Rprintf("\ti\tdist\n"); for(i = 0; i < Nv; i++) Rprintf("\t%d\t%lf\n", i, dist[i]); #endif #endif } spatstat.linnet/src/linequad.h0000755000176200001440000003450114141460471016157 0ustar liggesusers/* linequad.h Template code, #included several times in linequad.c Macros used: FUNNAME function name (unmarked version) FMKNAME function name (marked version) ALEA #defined if grid location should be randomised HUH #defined if debugging is on SWAP swap macro $Revision: 1.3 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void FUNNAME(ns, from, to, nv, xv, yv, eps, ndat, sdat, tdat, wdat, ndum, xdum, ydum, sdum, tdum, wdum, maxscratch) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. Data points on the network are specified by *ndat, sdat, tdat. *** Assumed to be sorted in order of 'sdat' ** Dummy points will be placed every 'eps' units along each segment. Output vectors: wdat quadrature weights for the data points wdum quadrature weights for the dummy points xdum, | ydum, | coordinates of dummy points sdum, | tdum | Space must be allocated for sum(ceiling(lengths/eps)) dummy points. */ int *ns; /* number of segments */ int *from, *to; /* endpoints of each segment */ int *nv; /* number of vertices */ double *xv, *yv; /* cartesian coords of vertices */ double *eps; /* desired spacing of dummy points */ int *ndat, *ndum; /* number of data & dummy points */ int *sdat, *sdum; /* segment id (local coordinate) */ double *tdat, *tdum; /* location (local coordinate) */ double *wdat, *wdum; /* quadrature weights */ double *xdum, *ydum; /* spatial coordinates of dummy points */ int *maxscratch; { int Nseg, Ndat, Ndum, Lmax, i, j, k, ll, m, fromi, toi; #ifdef HUH int Nvert; #endif int SegmentForData, nwhole, nentries, npieces, npieces1; double x0, y0, x1, y1, dx, dy; double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart; double tfirst, tlast, tcurrent, plen, w; int *serial, *count, *pieceid; char *isdata; double *tvalue, *pieceweight; Nseg = *ns; Ndat = *ndat; Ndum = 0; Lmax = *maxscratch; epsilon = *eps; #ifdef HUH Nvert = *nv; Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax); #endif /* allocate scratch space, one for each data/dummy point in current segment */ serial = (int *) R_alloc(Lmax, sizeof(int)); isdata = (char *) R_alloc(Lmax, sizeof(char)); tvalue = (double *) R_alloc(Lmax, sizeof(double)); pieceid = (int *) R_alloc(Lmax, sizeof(int)); /* allocate scratch space, one for each piece of current segment */ count = (int *) R_alloc(Lmax, sizeof(int)); pieceweight = (double *) R_alloc(Lmax, sizeof(double)); /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Ndat > 0) ? sdat[0] : -1; #ifdef ALEA GetRNGstate(); #endif /* loop over line segments */ for(i = 0; i < Nseg; i++) { #ifdef HUH Rprintf("Segment %d\n", i); #endif /* endpoints of segment */ fromi = from[i]; toi = to[i]; x0 = xv[fromi]; y0 = yv[fromi]; x1 = xv[toi]; y1 = yv[toi]; dx = x1 - x0; dy = y1 - y0; seglength = sqrt(dx * dx + dy * dy); /* divide segment into pieces of length eps with shorter bits at each end */ ratio = seglength/epsilon; nwhole = (int) floor(ratio); if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole; rump = (seglength - nwhole * epsilon)/2.0; epsfrac = epsilon/seglength; rumpfrac = rump/seglength; /* There are nwhole+2 pieces, with endpoints 0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1 */ /* Now place dummy points in these pieces */ #ifdef ALEA tfirst = rumpfrac * unif_rand(); #else tfirst = rumpfrac/2.0; #endif #ifdef HUH Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", nwhole, epsfrac, rumpfrac, tfirst); Rprintf("\tsegment length %lf divided into %d pieces\n", seglength, nwhole+2); #endif /* create a new dummy point in each piece */ #ifdef HUH Rprintf("\tMaking left dummy point %d\n", Ndum); #endif tvalue[0] = tfirst; serial[0] = Ndum; isdata[0] = NO; count[0] = 1; pieceid[0] = 0; xdum[Ndum] = x0 + dx * tfirst; ydum[Ndum] = y0 + dy * tfirst; sdum[Ndum] = i; tdum[Ndum] = tfirst; ++Ndum; if(nwhole > 0) { #ifdef HUH Rprintf("\tMaking %d middle dummy points\n", nwhole); #endif #ifdef ALEA gridstart = rumpfrac - unif_rand() * epsfrac; #else gridstart = rumpfrac - epsfrac/2.0; #endif for(j = 1; j <= nwhole; j++) { serial[j] = Ndum; tvalue[j] = tcurrent = gridstart + ((double) j) * epsfrac; isdata[j] = NO; count[j] = 1; pieceid[j] = j; xdum[Ndum] = x0 + dx * tcurrent; ydum[Ndum] = y0 + dy * tcurrent; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } } j = nwhole + 1; #ifdef HUH Rprintf("\tMaking right dummy point %d\n", Ndum); #endif serial[j] = Ndum; isdata[j] = NO; tvalue[j] = tlast = 1.0 - tfirst; count[j] = 1; pieceid[j] = j; xdum[Ndum] = x0 + dx * tlast; ydum[Ndum] = y0 + dy * tlast; sdum[Ndum] = i; tdum[Ndum] = tlast; ++Ndum; nentries = npieces = nwhole + 2; npieces1 = npieces-1; /* add any data points lying on current segment i */ while(SegmentForData == i) { #ifdef HUH Rprintf("\tData point %d lies on segment %d\n", k, i); #endif serial[nentries] = k; tvalue[nentries] = tcurrent = tdat[k]; isdata[nentries] = YES; /* determine which piece contains the data point */ ll = (int) ceil((tcurrent - rumpfrac)/epsfrac); if(ll < 0) ll = 0; else if(ll >= npieces) ll = npieces1; #ifdef HUH Rprintf("\tData point %d mapped to piece %d\n", k, ll); #endif count[ll]++; pieceid[nentries] = ll; ++nentries; ++k; SegmentForData = (k < Ndat) ? sdat[k] : -1; } /* compute counting weights for each piece of segment */ #ifdef HUH Rprintf("\tcounting weights..\n"); #endif for(ll = 0; ll < npieces; ll++) { plen = (ll == 0 || ll == npieces1)? rump : epsilon; pieceweight[ll] = plen/count[ll]; } /* apply weights to data/dummy points */ #ifdef HUH Rprintf("\tdistributing weights..\n"); #endif for(j = 0; j < nentries; j++) { m = serial[j]; ll = pieceid[j]; if(ll >= 0 && ll < npieces) { w = pieceweight[ll]; if(isdata[j]) { #ifdef HUH Rprintf("\t\tEntry %d: data point %d, piece %d\n", j, m, ll); #endif wdat[m] = w; } else { #ifdef HUH Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", j, m, ll); #endif wdum[m] = w; } } } } *ndum = Ndum; #ifdef ALEA PutRNGstate(); #endif } void FMKNAME(ns, from, to, nv, xv, yv, eps, ntypes, ndat, xdat, ydat, mdat, sdat, tdat, wdat, ndum, xdum, ydum, mdum, sdum, tdum, wdum, maxscratch) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. Data points on the network are specified by *ndat, xdat, ydat, mdat, sdat, tdat. *** Assumed to be sorted in order of 'sdat' ** Dummy points will be placed every 'eps' units along each segment and replicated for each possible mark. Each data point location is also replicated by dummy points with each possible mark except the mark of the data point. Output vectors: wdat quadrature weights for the data points wdum quadrature weights for the dummy points xdum, | ydum, | coordinates of dummy points sdum, | tdum | mdum marks for dummy points Space must be allocated for ntypes * sum(ceiling(lengths/eps)) dummy points. */ int *ns; /* number of segments */ int *from, *to; /* endpoints of each segment */ int *nv; /* number of vertices */ double *xv, *yv; /* cartesian coords of vertices */ double *eps; /* desired spacing of dummy points */ int *ndat, *ndum; /* number of data & dummy points */ int *ntypes; /* number of types */ double *xdat, *ydat; /* spatial coordinates of data points */ double *xdum, *ydum; /* spatial coordinates of dummy points */ int *mdat, *mdum; /* mark values */ int *sdat, *sdum; /* segment id (local coordinate) */ double *tdat, *tdum; /* location (local coordinate) */ double *wdat, *wdum; /* quadrature weights */ int *maxscratch; { int Nseg, Ndat, Ndum, Ntypes, Lmax, i, k, ll, m, fromi, toi; #ifdef HUH int Nvert; #endif int SegmentForData, nwhole, nentries, npieces, npieces1, nMpieces; int jpiece, jentry, jpdata, type, mcurrent; double x0, y0, x1, y1, dx, dy, xcurrent, ycurrent; double seglength, ratio, epsilon, rump, epsfrac, rumpfrac, gridstart; double tfirst, tlast, tcurrent, plen, w; int *serial, *count, *mkpieceid; char *isdata; double *tvalue, *countingweight; Nseg = *ns; Ndat = *ndat; Ntypes = *ntypes; Ndum = 0; Lmax = *maxscratch; epsilon = *eps; #ifdef HUH Nvert = *nv; Rprintf("Nseg=%d, Nvert=%d, Ndat=d, Lmax = %d\n\n", Nseg, Nvert, Ndat, Lmax); #endif /* allocate scratch space, one for each data/dummy point in current segment */ serial = (int *) R_alloc(Lmax, sizeof(int)); isdata = (char *) R_alloc(Lmax, sizeof(char)); tvalue = (double *) R_alloc(Lmax, sizeof(double)); mkpieceid = (int *) R_alloc(Lmax, sizeof(int)); /* allocate scratch space, one for each piece of current segment */ count = (int *) R_alloc(Lmax, sizeof(int)); countingweight = (double *) R_alloc(Lmax, sizeof(double)); /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Ndat > 0) ? sdat[0] : -1; #ifdef ALEA GetRNGstate(); #endif /* loop over line segments */ for(i = 0; i < Nseg; i++) { #ifdef HUH Rprintf("Segment %d\n", i); #endif /* endpoints of segment */ fromi = from[i]; toi = to[i]; x0 = xv[fromi]; y0 = yv[fromi]; x1 = xv[toi]; y1 = yv[toi]; dx = x1 - x0; dy = y1 - y0; seglength = sqrt(dx * dx + dy * dy); /* divide segment into pieces of length eps with shorter bits at each end */ ratio = seglength/epsilon; nwhole = (int) floor(ratio); if(nwhole > 2 && ratio - nwhole < 0.5) --nwhole; npieces = nwhole + 2; rump = (seglength - nwhole * epsilon)/2.0; epsfrac = epsilon/seglength; rumpfrac = rump/seglength; /* There are nwhole+2 pieces, with endpoints 0, rumpfrac, rumpfrac+epsfrac, rumpfrac+2*epsfrac, ..., 1-rumpfrac, 1 */ /* Now place dummy points in these pieces */ #ifdef ALEA tfirst = rumpfrac * unif_rand(); gridstart = rumpfrac - epsfrac * unif_rand(); #else tfirst = rumpfrac/2.0; gridstart = rumpfrac - epsfrac/2.0; #endif tlast = 1.0 - tfirst; #ifdef HUH Rprintf("\tnwhole=%d, epsfrac=%lf, rumpfrac=%lf, tfirst=%lf\n", nwhole, epsfrac, rumpfrac, tfirst); Rprintf("\tsegment length %lf divided into %d pieces\n", seglength, npieces); #endif /* 'Marked pieces' of segment are numbered in order (piece 0, mark 0), (piece 0, mark 1), ..., (piece 0, mark Ntypes-1), (piece 1, mark 0), ..... mpieceid = type + pieceid * Ntypes */ #ifdef HUH Rprintf("\tMaking %d x %d = %d dummy points\n", npieces, Ntypes, npieces * Ntypes); #endif /* create a new dummy point in each piece */ npieces1 = npieces-1; for(jpiece = 0; jpiece < npieces; jpiece++) { tcurrent = (jpiece == 0) ? tfirst : (jpiece == npieces1) ? tlast : (gridstart + ((double) jpiece) * epsfrac); xcurrent = x0 + dx * tcurrent; ycurrent = y0 + dy * tcurrent; for(type = 0; type < Ntypes; type++) { /* position in list of relevant data/dummy points */ jentry = type + jpiece * Ntypes; /* serial number of marked piece */ ll = jentry; tvalue[jentry] = tcurrent; serial[jentry] = Ndum; isdata[jentry] = NO; mkpieceid[jentry] = ll; count[ll] = 1; xdum[Ndum] = xcurrent; ydum[Ndum] = ycurrent; mdum[Ndum] = type; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } } nentries = npieces * Ntypes; /* handle any data points lying on current segment i */ while(SegmentForData == i) { #ifdef HUH Rprintf("\tData point %d lies on segment %d\n", k, i); #endif xcurrent = xdat[k]; ycurrent = ydat[k]; tcurrent = tdat[k]; mcurrent = mdat[k]; /* determine which piece contains the data point */ jpdata = (int) ceil((tcurrent - rumpfrac)/epsfrac); if(jpdata < 0) jpdata = 0; else if(jpdata >= npieces) jpdata = npieces1; #ifdef HUH Rprintf("\tData point %d falls in piece %d\n", k, jpdata); #endif /* copy data point, and create dummy points at same location with different marks */ for(type = 0; type < Ntypes; type++) { tvalue[nentries] = tcurrent; ll = type + jpdata * Ntypes; mkpieceid[nentries] = ll; count[ll]++; if(type == mcurrent) { /* data point */ isdata[nentries] = YES; serial[nentries] = k; } else { /* create dummy point */ isdata[nentries] = NO; serial[nentries] = Ndum; xdum[Ndum] = xcurrent; ydum[Ndum] = ycurrent; mdum[Ndum] = type; sdum[Ndum] = i; tdum[Ndum] = tcurrent; ++Ndum; } ++nentries; } ++k; SegmentForData = (k < Ndat) ? sdat[k] : -1; } /* compute counting weights for each piece of segment */ #ifdef HUH Rprintf("\tcounting weights..\n"); #endif for(jpiece = 0; jpiece < npieces; jpiece++) { plen = (jpiece == 0 || jpiece == npieces1)? rump : epsilon; for(type = 0; type < Ntypes; type++) { ll = type + jpiece * Ntypes; countingweight[ll] = plen/count[ll]; } } /* apply weights to data/dummy points */ #ifdef HUH Rprintf("\tdistributing weights..\n"); #endif nMpieces = npieces * Ntypes; for(jentry = 0; jentry < nentries; jentry++) { m = serial[jentry]; ll = mkpieceid[jentry]; if(ll >= 0 && ll < nMpieces) { w = countingweight[ll]; if(isdata[jentry]) { #ifdef HUH Rprintf("\t\tEntry %d: data point %d, piece %d\n", jentry, m, ll); #endif wdat[m] = w; } else { #ifdef HUH Rprintf("\t\tEntry %d: dummy point %d, piece %d\n", jentry, m, ll); #endif wdum[m] = w; } } } } *ndum = Ndum; #ifdef ALEA PutRNGstate(); #endif } spatstat.linnet/src/lincrossdist.c0000755000176200001440000000473714141460471017100 0ustar liggesusers#include #include #include "chunkloop.h" /* lincrossdist.c Shortest-path distances between pairs of points in linear network $Revision: 1.4 $ $Date: 2018/12/18 02:43:11 $ lincrossdist Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void lincrossdist(np, xp, yp, /* data points from which distances are measured */ nq, xq, yq, /* data points to which distances are measured */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nq, Nv, i, j, maxchunk; int Psegi, Qsegj, nbi1, nbi2, nbj1, nbj2; double xpi, ypi, xqj, yqj; double d, dPiV1, dPiV2, dV1Qj, dV2Qj, d11, d12, d21, d22; Np = *np; Nq = *nq; Nv = *nv; OUTERCHUNKLOOP(i, Np, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; Psegi = psegmap[i]; nbi1 = from[Psegi]; nbi2 = to[Psegi]; dPiV1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dPiV2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = 0; j < Nq; j++) { xqj = xq[j]; yqj = yq[j]; Qsegj = qsegmap[j]; if(Psegi == Qsegj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xqj, 2) + pow(ypi - yqj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[Qsegj]; nbj2 = to[Qsegj]; dV1Qj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); dV2Qj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dPiV1 + DPATH(nbi1,nbj1) + dV1Qj; d12 = dPiV1 + DPATH(nbi1,nbj2) + dV2Qj; d21 = dPiV2 + DPATH(nbi2,nbj1) + dV1Qj; d22 = dPiV2 + DPATH(nbi2,nbj2) + dV2Qj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = d; } } } } spatstat.linnet/src/lixel.c0000755000176200001440000001015114141460471015460 0ustar liggesusers#include #include /* lixel.c divide a linear network into shorter segments Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 */ void Clixellate(ns, fromcoarse, tocoarse, fromfine, tofine, nv, xv, yv, svcoarse, tvcoarse, nsplit, np, spcoarse, tpcoarse, spfine, tpfine) /* A linear network with *ns segments and *nv vertices is specified by the vectors from, to, xv, yv. The i-th segment will be subdivided into nsplit[i] subsegments. New data will be added at the end of the vectors 'xv' and 'yv' representing additional vertices in the new network. The point pattern data (*np points with local coordinates sp, tp in the coarse network) will be mapped to the new 'fine' network. Points are sorted by 'spcoarse' value. 'xv', 'yv', 'svcoarse', 'tvcoarse' must each have space for (nv + sum(nsplit-1)) entries. 'fromfine', 'tofine' must have length = sum(nsplit). */ int *ns; /* number of segments (input & output) */ int *fromcoarse, *tocoarse; /* endpoints of each segment (input) */ int *fromfine, *tofine; /* endpoints of each segment (output) */ int *nv; /* number of vertices (input & output) */ double *xv, *yv; /* cartesian coords of vertices (input & output) */ int *svcoarse; /* segment id of new vertex in COARSE network */ double *tvcoarse; /* location coordinate of new vertex on COARSE network */ int *nsplit; /* number of pieces into which each segment should be split */ int *np; /* number of data points */ double *tpcoarse, *tpfine; /* location coordinate */ int *spcoarse, *spfine; /* segment id coordinate */ { int Np, oldNs, oldNv, i, j, k, ll; int oldfromi, oldtoi, newlines, newNv, newNs, SegmentForData; double xstart, xend, ystart, yend, xincr, yincr, tn, tpk; Np = *np; newNv = oldNv = *nv; oldNs = *ns; newNs = 0; /* initialise pointer at start of point pattern Determine which segment contains first point */ k = 0; SegmentForData = (Np > 0) ? spcoarse[0] : -1; /* loop over line segments in original network */ for(i = 0; i < oldNs; i++) { newlines = nsplit[i]; oldfromi = fromcoarse[i]; oldtoi = tocoarse[i]; /* local coordinates of endpoints of segment, in ***coarse*** network */ svcoarse[oldfromi] = svcoarse[oldtoi] = i; tvcoarse[oldfromi] = 0.0; tvcoarse[oldtoi] = 1.0; if(newlines == 1) { /* copy existing segment to new segment list */ fromfine[newNs] = oldfromi; tofine[newNs] = oldtoi; /* advance pointer */ ++newNs; } else if(newlines > 1) { /* split segment into 'newlines' pieces */ xstart = xv[oldfromi]; ystart = yv[oldfromi]; xend = xv[oldtoi]; yend = yv[oldtoi]; xincr = (xend-xstart)/newlines; yincr = (yend-ystart)/newlines; for(j = 1; j < newlines; j++) { /* create new vertex, number 'newNv' */ xv[newNv] = xstart + j * xincr; yv[newNv] = ystart + j * yincr; /* local coordinates of new vertex relative to ***coarse*** network */ svcoarse[newNv] = i; tvcoarse[newNv] = ((double) j)/((double) newlines); /* create new segment, number 'newNs', ending at new vertex */ fromfine[newNs] = (j == 1) ? oldfromi : (newNv-1); tofine[newNs] = newNv; /* advance */ ++newNv; ++newNs; } /* create segment from last added vertex to end of old segment */ fromfine[newNs] = newNv-1; tofine[newNs] = oldtoi; ++newNs; } /* handle any data points lying on current segment i */ while(SegmentForData == i) { if(newlines == 1) { spfine[k] = spcoarse[k]; tpfine[k] = tpcoarse[k]; } else { /* map location on coarse segment to fine segment */ tn = tpcoarse[k] * newlines; ll = (int) floor(tn); ll = (ll < 0) ? 0 : (ll >= newlines) ? (newlines - 1): ll; tpk = tn - ll; if(tpk < 0.0) tpk = 0.0; else if(tpk > 1.0) tpk = 1.0; tpfine[k] = tpk; spfine[k] = newNs - newlines + ll; } /* advance to next data point */ ++k; SegmentForData = (k < Np) ? spcoarse[k] : -1; } } *nv = newNv; *ns = newNs; } spatstat.linnet/src/linSnncross.h0000755000176200001440000000661314141460471016673 0ustar liggesusers/* linSnncross.h Function body definitions with macros Sparse representation of network $Revision: 1.5 $ $Date: 2018/12/18 02:43:11 $ Copyright (C) Adrian Baddeley, Ege Rubak and Rolf Turner 2001-2018 Licence: GNU Public Licence >= 2 Macros used: FNAME name of function WHICH whether 'nnwhich' is required HUH debugging ! Data points must be ordered by segment index ! */ void FNAME(np, sp, tp, /* data points 'from' (ordered by sp) */ nq, sq, tq, /* data points 'to' (ordered by sq) */ nv, /* number of network vertices */ ns, from, to, /* segments */ seglen, /* segment lengths */ huge, /* value taken as infinity */ tol, /* tolerance for updating distances */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *sp, *sq; /* integer vectors (mappings) */ double *tp, *tq; /* fractional location coordinates */ double *huge, *tol; double *seglen; double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j, ivleft, ivright, jfirst, jlast, k; double d, hugevalue, slen, tpi; double *dminvert; /* min dist from each vertex */ #ifdef WHICH int *whichvert; /* which min from each vertex */ #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* First compute min distance to target set from each vertex */ dminvert = (double *) R_alloc(Nv, sizeof(double)); #ifdef WHICH whichvert = (int *) R_alloc(Nv, sizeof(int)); Clinvwhichdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert, whichvert); #else Clinvdist(nq, sq, tq, nv, ns, from, to, seglen, huge, tol, dminvert); #endif #ifdef HUH Rprintf("Initialise answer\n"); #endif /* initialise nn distances from source points */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* run through all source points */ #ifdef HUH Rprintf("Run through source points\n"); #endif jfirst = 0; for(i = 0; i < Np; i++) { tpi = tp[i]; k = sp[i]; /* segment containing this point */ slen = seglen[k]; ivleft = from[k]; ivright = to[k]; #ifdef HUH Rprintf("Source point %d lies on segment %d = [%d,%d]\n", i, k, ivleft, ivright); #endif d = slen * tpi + dminvert[ivleft]; if(nndist[i] > d) { #ifdef HUH Rprintf("\tMapping to left endpoint %d, distance %lf\n", ivleft, d); #endif nndist[i] = d; #ifdef WHICH nnwhich[i] = whichvert[ivleft]; #endif } d = slen * (1.0 - tpi) + dminvert[ivright]; if(nndist[i] > d) { #ifdef HUH Rprintf("\tMapping to right endpoint %d, distance %lf\n", ivright, d); #endif nndist[i] = d; #ifdef WHICH nnwhich[i] = whichvert[ivright]; #endif } /* find any target points in this segment */ while(jfirst < Nq && sq[jfirst] < k) jfirst++; jlast = jfirst; while(jlast < Nq && sq[jlast] == k) jlast++; --jlast; /* if there are no such points, then jlast < jfirst */ if(jfirst <= jlast) { for(j = jfirst; j <= jlast; j++) { d = slen * fabs(tq[j] - tpi); if(nndist[i] > d) { nndist[i] = d; #ifdef WHICH nnwhich[i] = j; #endif } } } } } spatstat.linnet/NEWS0000644000176200001440000001311714155070040014103 0ustar liggesusers CHANGES IN spatstat.linnet VERSION 2.3-1 OVERVIEW o More control over resolution of 'linim' objects. o Improved documentation. o Minor improvements and bug fixes. SIGNIFICANT USER-VISIBLE CHANGES o as.linim.default, as.linim.linfun New argument 'nd' o integral.linfun New argument 'nd' o rjitterlpp, rjitter.lpp The function 'rjitterlpp' has been renamed 'rjitter.lpp' and is now a method for the generic 'rjitter'. o rjitterlpp This function still exists, but is now deprecated in favour of 'rjitter.lpp'. BUG FIXES o rjitterlpp The argument 'radius' was interpreted as a fraction of segment length rather than an absolute distance. Fixed. CHANGES IN spatstat.linnet VERSION 2.3-0 OVERVIEW o We thank Suman Rakshit, Greg McSwiggan and Marc Schneble for contributions. o Sufficient Dimension Reduction on a linear network. o Perspective-view plots of linfun and linim objects. o Bug fix to Math.linim o Minor additions. NEW FUNCTIONS o sdr.lpp Sufficient Dimension Reduction on a linear network. [Contributed by Suman Rakshit.] o persp.linfun, persp.linim Perspective-view plots of functions on a linear network (class linim) and images on a linear network (class linfun). [Written by Adrian Baddeley and Greg McSwiggan.] SIGNIFICANT USER-VISIBLE CHANGES o response.lppm Method for 'response' for fitted point process models on a network. BUG FIXES o Math.linim If 'Z' was a pixel image on a network (class 'linim') and 'v' was a single number, then 'Z/v' was a pixel image (class 'im') instead of a pixel image on a network (class 'linim'). [Spotted by Marc Schneble.] Fixed. o bw.voronoi Printed output stated that the cross-validation criterion was minimised instead of maximised. CHANGES IN spatstat.linnet VERSION 2.2-1 OVERVIEW o Minor changes to satisfy CRAN. CHANGES IN spatstat.linnet VERSION 2.2-0 OVERVIEW o Extensions to rhohat.lpp and rhohat.lppm. o Internal bug fixes. SIGNIFICANT USER-VISIBLE CHANGES o rhohat.lpp, rhohat.lppm New option (smoother='piecewise') computes a piecewise-constant estimate of rho(z). o rhohat.lpp, rhohat.lppm The result now includes the 'average' intensity rho. CHANGES IN spatstat.linnet VERSION 2.1-1 OVERVIEW o Tweak to satisfy the package checker. CHANGES IN spatstat.linnet VERSION 2.1-0 OVERVIEW o We thank Andrea Gilardi for contributions. o Bug fix in density.lpp o Improvement to densityEqualSplit o New function for jittering point patterns on a network. o Function renamed. NEW FUNCTIONS o rjitterlpp Apply random displacements to the points on a linear network. o densityHeat.lpp Diffusion kernel estimation of intensity on a linear network. (formerly known as 'densityHeatlpp' and 'densityHeat', this is now a method for the new generic 'densityHeat') SIGNIFICANT USER-VISIBLE CHANGES o densityEqualSplit New arguments 'at' and 'leaveoneout' for consistency with other functions. o densityHeat The function formerly known as 'densityHeat' or 'densityHeatlpp' is now renamed 'densityHeat.lpp' and is a method for the generic 'densityHeat'. o density.lpp Accelerated when the pattern contains duplicated points. BUG FIXES o density.lpp The result had the wrong length if 'x' contained duplicated points when 'weights' were given and 'at="points"'. [Spotted by Andrea Gilardi] Fixed. CHANGES IN spatstat.linnet VERSION 2.0-0 OVERVIEW o We thank Andrea Gilardi for contributions. o Function renamed. o Bug fix in density.lpp SIGNIFICANT USER-VISIBLE CHANGES o densityHeat 'densityHeat' has been temporarily renamed 'densityHeatlpp', to prevent errors in the package checking process. o [.linim Accelerated. BUG FIXES o deviance.lppm, pseudoR2.lppm Results were completely incorrect, due to a coding error. Fixed. o density.lpp Crashed if 'weights' were given and 'x' contained duplicated points. [Spotted by Andrea Gilardi] Fixed. o Lcross.inhom, Kcross.inhom, Kmulti.inhom The option 'correction="none"' was accepted but ignored. [Spotted by Corey Anderson.] Fixed. o simulate.rhohat Crashed when applied to rhohat objects computed from data on a linear network. Fixed. CHANGES IN spatstat.linnet VERSION 1.65-9 OVERVIEW o Minor internal changes. CHANGES IN spatstat.linnet VERSION 1.65-8 OVERVIEW o Function renamed. SIGNIFICANT USER-VISIBLE CHANGES o densityHeat 'densityHeat' has been renamed 'densityHeat.lpp'. CHANGES IN spatstat.linnet VERSION 1.65-7 OVERVIEW o Tweaks to satisfy CRAN. CHANGES IN spatstat.linnet VERSION 1.65-1 OVERVIEW o Added NEWS file. CHANGES IN spatstat.linnet VERSION 1.65-0 OVERVIEW o Package initialised at version 1.65-0 SIGNIFICANT USER-VISIBLE CHANGES o spatstat.linnet The package 'spatstat.linnet' has been created from a subset of the code in the original 'spatstat' package version 1.65-0. It contains the functionality for statistical analysis of spatial data on a linear network. For an overview, see help("spatstat.linnet-package") o Execution The 'spatstat.linnet' package is slightly faster than the corresponding code in the 'spatstat' package, because the procedure for calling internal C functions has been streamlined. spatstat.linnet/R/0000755000176200001440000000000014141460471013610 5ustar liggesusersspatstat.linnet/R/lixellate.R0000644000176200001440000000610314144334007015714 0ustar liggesusers#' #' lixellate.R #' #' Divide each segment of a linear network into several pieces #' #' $Revision: 1.11 $ $Date: 2021/01/07 03:53:27 $ #' lixellate <- function(X, ..., nsplit, eps, sparse=TRUE) { missn <- missing(nsplit) || (length(nsplit) == 0) misse <- missing(eps) || (length(eps) == 0) if(missn && misse) stop("One of the arguments 'nsplit' or 'eps' must be given") if(!missn && !misse) stop("The arguments 'nsplit' or 'eps' are incompatible") if(!missn) { stopifnot(is.numeric(nsplit)) stopifnot(all(is.finite(nsplit))) stopifnot(all(nsplit >= 1)) if(!all(nsplit == as.integer(nsplit))) stop("nsplit should be an integer or vector of integers", call.=FALSE) } else { check.1.real(eps) stopifnot(eps > 0) } if(is.lpp(X)) { rtype <- "lpp" np <- npoints(X) L <- as.linnet(X) } else if(inherits(X, "linnet")) { rtype <- "linnet" L <- X X <- runiflpp(1, L) np <- 0 } else stop("X should be a linnet or lpp object") if(is.null(sparse)) sparse <- identical(L$sparse, TRUE) from <- L$from to <- L$to ns <- length(from) if(missn) { lenfs <- lengths_psp(as.psp(L)) nsplit <- pmax(ceiling(lenfs/eps), 1L) } else { if(length(nsplit) == 1) { nsplit <- rep(nsplit, ns) } else if(length(nsplit) != ns) { stop(paste("nsplit should be a single number,", "or a vector of length equal to the number of segments")) } } sumN <- sum(nsplit) sumN1 <- sum(nsplit-1) V <- vertices(L) nv <- npoints(V) xv <- V$x yv <- V$y coordsX <- coords(X) sp <- coordsX$seg tp <- coordsX$tp ## sort data in increasing order of 'sp' oo <- order(sp) z <- .C(SL_Clixellate, ns=as.integer(ns), fromcoarse=as.integer(from-1L), tocoarse = as.integer(to-1L), fromfine=as.integer(integer(sumN)), tofine = as.integer(integer(sumN)), nv = as.integer(nv), xv = as.double(c(xv, numeric(sumN1))), yv = as.double(c(yv, numeric(sumN1))), svcoarse = as.integer(integer(nv + sumN1)), tvcoarse = as.double(numeric(nv + sumN1)), nsplit = as.integer(nsplit), np = as.integer(np), spcoarse = as.integer(sp[oo]-1L), tpcoarse = as.double(tp[oo]), spfine = as.integer(integer(np)), tpfine = as.double(numeric(np)), PACKAGE="spatstat.linnet") Lfine <- with(z, { ii <- seq_len(nv) Vnew <- ppp(xv[ii], yv[ii], window=Frame(L), check=FALSE) Lfine <- linnet(Vnew, edges=cbind(fromfine,tofine)+1L, sparse=sparse) marks(Lfine$vertices) <- markcbind(marks(Lfine$vertices), data.frame(segcoarse=svcoarse+1L, tpcoarse=tvcoarse)) Lfine }) if(rtype == "linnet") return(Lfine) ## put coordinates back in original order sp[oo] <- as.integer(z$spfine + 1L) tp[oo] <- z$tpfine coordsX$seg <- sp coordsX$tp <- tp ## make lpp Xfine <- lpp(coordsX, Lfine) marks(Xfine) <- marks(X) return(Xfine) } spatstat.linnet/R/bermanlpp.R0000644000176200001440000000350014144334007015707 0ustar liggesusers# # bermanlpp.R # # Tests from Berman (1986) adapted to linear networks # # $Revision: 1.2 $ $Date: 2020/06/17 04:36:06 $ # # berman.test.lpp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) force(covariate) if(is.character(covariate)) covname <- covariate which <- match.arg(which) alternative <- match.arg(alternative) model <- lppm(X) dont.complain.about(model) do.call(bermantestEngine, resolve.defaults(list(quote(model), quote(covariate), which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } berman.test.lppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) force(model) force(covariate) if(is.character(covariate)) covname <- covariate verifyclass(model, "lppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(bermantestEngine, resolve.defaults(list(quote(model), quote(covariate), which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Xname))) } spatstat.linnet/R/sdr.R0000644000176200001440000000446514141460471014534 0ustar liggesusers#' Sufficient Dimension Reduction #' including linear network case sdr.lpp <- local({ sdr.lpp <- function(X, covariates, method=c("DR", "NNIR", "SAVE", "SIR", "TSE"), Dim1=1, Dim2=1, predict=FALSE, ...) { stopifnot(is.ppp(X) || is.lpp(X)) method <- match.arg(method) trap.extra.arguments(...) #' ensure 'covariates' is a list of compatible images if(!inherits(covariates, "imlist") && !all(sapply(covariates, is.im))) stop("Argument 'covariates' must be a list of images") nc <- length(covariates) if(nc == 0) stop("Need at least one covariate!") if(nc < Dim1 + (method == "TSE") * Dim2) stop(paste(if(method == "TSE") "Dim1 + Dim2" else "Dim1", "must not exceed the number of covariates"), call.=FALSE) #' extract "all" values covariates <- as.solist(covariates) Ypixval <- pairs(covariates, plot=FALSE) Ypixval <- Ypixval[complete.cases(Ypixval), , drop=FALSE] #' compute sample mean and covariance matrix m <- colMeans(Ypixval) V <- cov(Ypixval) #' evaluate each image at point data locations YX <- sapply(covariates, safelook, Y=X) #' apply precomputed standardisation Zx <- t(t(YX) - m) %*% matrixinvsqrt(V) #' ready coordsX <- coords(X) result <- switch(method, DR = calc.DR(COV=V, z=Zx, Dim=Dim1), NNIR = calc.NNIR(COV=V, z=Zx, pos=coordsX, Dim=Dim1), SAVE = calc.SAVE(COV=V, z=Zx, Dim=Dim1), SIR = calc.SIR(COV=V, z=Zx ), TSE = calc.TSE(COV=V, z=Zx, pos=coordsX, Dim1=Dim1, Dim2=Dim2) ) #' covnames <- names(covariates) %orifnull% paste0("Y", 1:nc) dimnames(result$B) <- list(covnames, paste0("B", 1:ncol(result$B))) if(method == "TSE") { result$M1 <- namez(result$M1) result$M2 <- namez(result$M2) } else { result$M <- namez(result$M) } if(predict) result$Y <- sdrPredict(covariates, result$B) return(result) } safelook <- function(Z, Y, ...) { if(is.im(Z)) safelookup(Z, Y, ...) else Z[Y] } namez <- function(M, prefix="Z") { dimnames(M) <- list(paste0(prefix, 1:nrow(M)), paste0(prefix, 1:ncol(M))) return(M) } sdr.lpp }) spatstat.linnet/R/density.lpp.R0000644000176200001440000007315314144334010016205 0ustar liggesusers#' #' density.lpp.R #' #' Method for 'density' for lpp objects #' #' Copyright (C) 2017-2020 Greg McSwiggan and Adrian Baddeley #' density.lpp <- function(x, sigma=NULL, ..., weights=NULL, distance=c("path", "euclidean"), continuous=TRUE, kernel="gaussian") { stopifnot(inherits(x, "lpp")) distance <- match.arg(distance) weights <- pointweights(x, weights=weights, parent=parent.frame()) if(distance == "euclidean") { #' Euclidean 2D kernel ans <- densityQuick.lpp(x, sigma, ..., kernel=kernel, weights=weights) } else { #' kernel is 1-D kernel <- match.kernel(kernel) if(continuous && (kernel == "gaussian")) { #' equal-split continuous with Gaussian kernel: use heat equation ans <- densityHeat.lpp(x, sigma, ..., weights=weights) } else { ##' Okabe-Sugihara equal-split method ans <- densityEqualSplit(x, sigma, ..., kernel=kernel, weights=weights) } } return(ans) } density.splitppx <- function(x, sigma=NULL, ...) { if(!all(sapply(x, is.lpp))) stop("Only implemented for patterns on a linear network") solapply(x, density.lpp, sigma=sigma, ...) } densityEqualSplit <- function(x, sigma=NULL, ..., at=c("pixels", "points"), leaveoneout=TRUE, weights=NULL, kernel="epanechnikov", continuous=TRUE, epsilon=1e-6, verbose=TRUE, debug=FALSE, savehistory=TRUE) { ## Based on original code by Adrian Baddeley and Greg McSwiggan 2014-2016 check.1.real(sigma) at <- match.arg(at) L <- as.linnet(x) leaveoneout <- leaveoneout && (at == "points") # weights np <- npoints(x) if(is.null(weights)) { weights <- rep(1, np) } else { stopifnot(is.numeric(weights)) check.nvector(weights, np, oneok=TRUE) if(length(weights) == 1L) weights <- rep(weights, np) } ## infinite bandwidth if(bandwidth.is.infinite(sigma)) { out <- switch(at, pixels = as.linim(flatdensityfunlpp(x, weights=weights)), points = flatdensityatpointslpp(x, weights=weights, leaveoneout=leaveoneout)) attr(out, "sigma") <- sigma return(out) } #' collapse duplicates efficiently if(leaveoneout) { collapsed <- FALSE } else { umap <- uniquemap(x) ii <- seq_len(npoints(x)) uniek <- (umap == ii) if(collapsed <- !all(uniek)) { x <- x[uniek] weights <- if(is.null(weights)) { as.numeric(table(umap)) } else { tapplysum(weights, list(factor(umap))) } } } ## Extract local coordinates of data n <- npoints(x) coo <- coords(x) seg <- coo$seg tp <- coo$tp # lengths of network segments Llines <- as.psp(L) Llengths <- lengths_psp(Llines) ## set up query locations switch(at, pixels = { ## pixellate linear network linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask, value=0) ## extract pixel centres xx <- raster.x(linemask) yy <- raster.y(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) nquery <- nrow(pixdf) ## project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) }, points = { projmap <- data.frame(mapXY=seg, tp=tp) nquery <- n }) # initialise density values at query locations values <- rep(0, nquery) # initialise stack stack <- data.frame(seg=integer(0), from=logical(0), distance=numeric(0), weight=numeric(0), origin=integer(0), generation=integer(0)) # process each data point for(i in seq_len(n)) { segi <- seg[i] tpi <- tp[i] len <- Llengths[segi] # evaluate kernel on segment containing x[i] relevant <- (projmap$mapXY == segi) if(leaveoneout) relevant[i] <- FALSE values[relevant] <- values[relevant] + dkernel(len * (projmap$tp[relevant] - tpi), kernel=kernel, sd=sigma) # push the two tails onto the stack stack <- rbind(data.frame(seg = c(segi, segi), from = c(TRUE, FALSE), distance = len * c(tpi, 1-tpi), weight = rep(weights[i], 2L), origin = rep(i, 2L), generation = rep(1L, 2)), stack) } Lfrom <- L$from Lto <- L$to if(verbose) niter <- 0 if(savehistory) history <- data.frame(iter=integer(0), qlen=integer(0), totmass=numeric(0), maxmass=numeric(0)) lastgen <- resolve.1.default(list(lastgen=Inf), list(...)) sortgen <- resolve.1.default(list(sortgen=FALSE), list(...)) sortgen <- sortgen || is.finite(lastgen) ## process the stack while(nrow(stack) > 0) { if(debug) print(stack) masses <- with(stack, abs(weight) * pkernel(distance, kernel=kernel, sd=sigma, lower.tail=FALSE)) totmass <- sum(masses) maxmass <- max(masses) if(savehistory) history <- rbind(history, data.frame(iter=nrow(history)+1L, qlen=nrow(stack), totmass=totmass, maxmass=maxmass)) if(verbose) { niter <- niter + 1L cat(paste("Iteration", niter, "\tStack length", nrow(stack), "\n")) cat(paste("Total stack mass", totmass, "\tMaximum", maxmass, "\n")) } # trim tiny <- (masses < epsilon) if(any(tiny)) { if(verbose) { ntiny <- sum(tiny) cat(paste("Removing", ntiny, "tiny", ngettext(ntiny, "tail", "tails"), "\n")) } stack <- stack[!tiny, ] } if(nrow(stack) == 0) break; # pop the top of the stack H <- stack[1L, , drop=FALSE] stack <- stack[-1L, , drop=FALSE] # segment and vertex Hseg <- H$seg Hvert <- if(H$from) Lfrom[Hseg] else Lto[Hseg] Hdist <- H$distance Hgen <- H$generation Horigin <- H$origin ## finished processing? if(Hgen > lastgen) break; # find all segments incident to this vertex incident <- which((Lfrom == Hvert) | (Lto == Hvert)) degree <- length(incident) # exclude reflecting paths? if(!continuous) incident <- setdiff(incident, Hseg) for(J in incident) { lenJ <- Llengths[J] # determine whether Hvert is the 'to' or 'from' endpoint of segment J H.is.from <- (Lfrom[J] == Hvert) # update weight if(continuous) { Jweight <- H$weight * (2/degree - (J == Hseg)) } else { Jweight <- H$weight/(degree-1) } # increment density on segment relevant <- (projmap$mapXY == J) if(leaveoneout) relevant[Horigin] <- FALSE tp.rel <- projmap$tp[relevant] d.rel <- lenJ * (if(H.is.from) tp.rel else (1 - tp.rel)) values[relevant] <- values[relevant] + Jweight * dkernel(d.rel + Hdist, kernel=kernel, sd=sigma) # push other end of segment onto stack stack <- rbind(data.frame(seg = J, from = !(H.is.from), distance = lenJ + Hdist, weight = Jweight, origin = Horigin, generation = Hgen + 1L), stack) if(sortgen) stack <- stack[order(stack$generation), , drop=FALSE] if(verbose) print(stack) } } switch(at, points = { if(!collapsed) { out <- values } else { ## reinstate full sequence out <- numeric(n) out[uniek] <- values out <- out[umap] } }, pixels = { ## attach values to nearest pixels Z <- lineimage Z[pixelcentres] <- values ## attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df) }) attr(out, "sigma") <- sigma if(savehistory) attr(out, "history") <- history return(out) } densityHeat.lpp <- function(x, sigma, ..., at=c("pixels", "points"), leaveoneout=TRUE, weights=NULL, dx=NULL, dt=NULL, iterMax=1e6, finespacing=TRUE, verbose=FALSE) { stopifnot(is.lpp(x)) check.1.real(sigma) at <- match.arg(at) if(!is.null(weights)) check.nvector(weights, npoints(x)) ## internal arguments fun <- resolve.1.default(list(fun=FALSE), list(...)) if(bandwidth.is.infinite(sigma)) { out <- switch(at, pixels = { if(fun) flatdensityfunlpp(x, weights=weights) else as.linim(flatdensityfunlpp(x, weights=weights)) }, points = flatdensityatpointslpp(x, weights=weights, leaveoneout=leaveoneout)) attr(out, "sigma") <- sigma return(out) } if(at == "points") { out <- densitypointsLPP(x, sigma, ..., leaveoneout=leaveoneout, weights=weights, nsigma=1, dx=dx, dt=dt, iterMax=iterMax, finespacing=finespacing, verbose=verbose) attr(out, "sigma") <- sigma return(out) } #' collapse duplicates efficiently umap <- uniquemap(x) ii <- seq_len(npoints(x)) uniek <- (umap == ii) if(all(uniek)) { x <- x[uniek] weights <- if(is.null(weights)) { as.numeric(table(umap)) } else { tapplysum(weights, list(factor(umap))) } } ## ## determine algorithm parameters L <- as.linnet(x) p <- resolve.heat.steps(sigma, dx=dx, dt=dt, iterMax=iterMax, L=L, finespacing=finespacing, ..., verbose=verbose) ## go a <- FDMKERNEL(lppobj=x, dtx=p$dx, dtt=p$dt, M=p$niter, weights=weights, stepnames=list(time="dt", space="dx"), verbose=verbose) f <- a$kernel_fun if(fun) { result <- f } else if(!finespacing) { if(verbose) cat("Computing pixel image... ") result <- as.linim(f, ...) if(verbose) cat("Done.\n") } else { if(verbose) cat("Computing pixel image... ") Z <- as.im(as.linim(f, ...)) if(verbose) cat("Saving data at sample points... ") df <- a$df colnames(df)[colnames(df) == "seg"] <- "mapXY" ij <- nearest.valid.pixel(df$x, df$y, Z) xy <- data.frame(xc = Z$xcol[ij$col], yc = Z$yrow[ij$row]) df <- cbind(xy, df) result <- linim(domain(f), Z, restrict=FALSE, df=df) if(verbose) cat("Done.\n") } attr(result, "sigma") <- sigma attr(result, "dx") <- a$deltax attr(result, "dt") <- a$deltat return(result) } FDMKERNEL <- function(lppobj, dtt, dtx, M, nsave=1, weights=NULL, stepnames=list(time="dtt", space="dtx"), setuponly=FALSE, verbose=FALSE) { ## Copyright (c) Greg McSwiggan and Adrian Baddeley 2016-2020 ## Based on original code by Greg McSwiggan 2015-2016 ## Internal code: parameters are now assumed to be valid. ## Validation code is now in 'resolve.heat.steps()' net2 <- as.linnet(lppobj) npts <- npoints(lppobj) if(verbose) cat("Subdividing network ...") lenfs <- lengths_psp(as.psp(net2)) seg_in_lengths <- pmax(1, round(lenfs/dtx)) new_lpp <- lixellate(lppobj, nsplit=seg_in_lengths) net_nodes <- as.linnet(new_lpp) nvert <- nvertices(net_nodes) if(verbose) { cat("Done.", fill=TRUE) splat("New network:") print(net_nodes) cat("Constructing update matrix A ..") } alpha <- dtt/(dtx^2) A <- net_nodes$m * alpha diag(A) <- 1 - colSums(A) if(verbose) { cat("Done.", fill=TRUE) splat("alpha = ", alpha) cat("Building initial state ..") } if(npts == 0) { ff <- factor(integer(0), levels=seq_len(nvert)) ww <- numeric(0) U0 <- numeric(nvert) } else { tp1 <- as.numeric(new_lpp$data$tp) tp2 <- as.vector(rbind(1 - tp1, tp1)) newseg <- as.integer(new_lpp$data$seg) vert_init_events1 <- as.vector(rbind(net_nodes$from[newseg], net_nodes$to[newseg])) ff <- factor(vert_init_events1, levels=seq_len(nvert)) ww <- if(is.null(weights)) tp2 else (rep(weights, each=2) * tp2) ww <- ww/dtx U0 <- tapplysum(ww, list(ff)) } if(verbose) cat("Done.", fill=TRUE) if(setuponly) { out <- list(linnet_obj = net_nodes, lixelmap = ff, lixelweight = ww, Amatrix = A, U0 = U0, deltax = dtx, deltat = dtt) return(out) } if(nsave == 1) { blockstart <- 1 blockend <- M } else { blocksize <- ceiling(M/nsave) blockend <- pmin(blocksize * seq_len(nsave), M) blockstart <- c(1L, blockend[-nsave]) } blocklength <- blockend - blockstart + 1L elapsedtime <- blockend * dtt if(verbose) cat("Running iterative solver ..") U <- matrix(0, nvert, nsave) if(npts > 0) { currentU <- U0 for(i in 1:nsave) { v <- currentU nit <- blocklength[i] for(j in 1:nit) v <- A %*% v U[,i] <- currentU <- as.numeric(v) } } finalU <- U[,ncol(U)] if(verbose) { cat("Done.", fill=TRUE) cat("Mapping results to spatial location ..") } vert_new <- as.data.frame(vertices(net_nodes))[,c("x","y","segcoarse","tpcoarse")] colnames(vert_new) <- c("x", "y", "seg", "tp") Nodes <- lpp(vert_new, net2, check=FALSE) nodemap <- nnfun(Nodes) interpUxyst <- function(x, y, seg, tp) { finalU[nodemap(x,y,seg,tp)] } interpU <- linfun(interpUxyst, net2) df <- cbind(vert_new, data.frame(values=finalU)) if(nsave > 1) { interpUxystK <- function(x, y, seg, tp, k) { nono <- nodemap(x,y,seg,tp) if(missing(k)) U[nono, ] else U[nono, k] } interpUK <- linfun(interpUxystK, net2) } else interpUK <- NULL if(verbose) cat("Done.", fill=TRUE) out <- list(kernel_fun = interpU, elapsedtime = elapsedtime, tau = sqrt(2 * elapsedtime), df = df, deltax = dtx, deltat = dtt, progressfun = interpUK) return(out) } resolve.heat.steps <- function(sigma, ..., ## main parameters (all are optional) ## A=adjustable by code, F=fixed, A*=adjustable only if allow.adjust=TRUE dx=NULL, # spacing of sample points (A) dt=NULL, # time step (A) niter=NULL, # number of iterations (A*) iterMax=1e6, # maximum number of iterations (can be Inf) (F) nsave=1, # number of time points for which data should be saved (F) # nsave = Inf means save all iterations, nsave = niter ## network information seglengths=NULL, # lengths of network edges maxdegree=NULL, # maximum vertex degree AMbound=NULL, # Anderson-Morley bound L=NULL, # optional linear network from which to extract data ## rules finespacing=TRUE, # if FALSE, use spacing implied by pixel resolution # if TRUE, use finer spacing fineNsplit=30, # finespacing rule average number of pieces per edge fineNlixels=100, # finespacing rule total number of pieces W=NULL, eps=NULL, dimyx=NULL, xy=NULL, # pixel resolution allow.adjust=TRUE, # 'niter' can be changed warn.adjust=verbose, verbose=TRUE, stepnames=list(time="dt", space="dx")) { ## Based on original code by Greg McSwiggan 2015-2016 check.1.real(sigma) # infinite sigma is allowed check.1.real(nsave) # infinite 'nsave' is allowed (will be reset to niter) if(is.finite(nsave)) check.1.integer(nsave) stopifnot(nsave >= 1) dx.given <- !is.null(dx) && check.1.real(dx) dt.given <- !is.null(dt) && check.1.real(dt) niter.given <- !is.null(niter) && check.1.integer(niter) nsave.given <- (nsave > 1) obey.nsave <- nsave.given && is.finite(nsave) save.all <- is.infinite(nsave) one <- 1 + .Machine$double.eps # tolerance for comparisons if(verbose) { if(dx.given) splat("Given: dx =", dx) if(dt.given) splat("Given: dt =", dx) if(niter.given) splat("Given: niter =", niter) if(nsave.given) splat("Given: nsave =", nsave) } ## ---------- CHARACTERISTICS OF NETWORK ------------------ if(is.null(L)) { check.1.integer(maxdegree) check.1.integer(AMbound) } else { L <- as.linnet(L) if(is.null(seglengths)) seglengths <- lengths_psp(as.psp(L)) if(is.null(maxdegree) || is.null(AMbound)) { verdeg <- vertexdegree(L) maxdegree <- max(verdeg) AMbound <- max(verdeg[L$from] + verdeg[L$to]) } if(is.null(W)) W <- Frame(L) } ## segment lengths nseg <- length(seglengths) lmin <- min(seglengths[seglengths > 0]) lbar <- mean(seglengths[seglengths > 0]) ltot <- sum(seglengths) if(verbose) { splat(" Network:") splat(" total length =", ltot) splat(" number of edges =", nseg) splat(" average nonzero edge length = ", lbar) splat(" shortest nonzero edge length = ", lmin) } ## ----------- NUMBER OF ITERATIONS --------------------------------- if(niter.given) { if(verbose) splat(" Validating niter =", niter) stopifnot(niter >= 10) stopifnot(niter <= iterMax) if(save.all) { nsave <- niter } else if(obey.nsave && ( (niter < nsave) || (niter %% nsave != 0) )) { if(!allow.adjust) stop(paste("niter =", niter, "is not a multiple of nsave =", nsave), call.=FALSE) niterOLD <- niter niter <- nsave * max(1L, floor(as.double(niter)/nsave)) if(warn.adjust || verbose) { comment <- paste("niter was adjusted from", niterOLD, "to", niter, "to ensure it is a multiple of nsave =", nsave) if(warn.adjust) warning(comment, call.=FALSE) if(verbose) splat(comment) } } } ## ----------- TIME STEP dt ------ (if given) ----------------------- if(niter.given) { if(verbose) splat(" Determining dt from niter") dtOLD <- dt dt <- sigma^2/(2 * niter) if(dt.given) { if(!allow.adjust) stop("Only one of the arguments dt and niter should be given", call.=FALSE) if(warn.adjust || verbose) { quibble <- paste("Time step dt was adjusted from", dtOLD, "to sigma^2/(2 * niter) =", sigma^2, "/", 2 * niter, "=", dt) if(warn.adjust) warning(quibble, call.=FALSE) if(verbose) splat(quibble) } } else if(verbose) splat(" dt =", dt) } else if(dt.given) { if(verbose) splat(" Determining niter from dt", if(obey.nsave) "and nsave" else NULL) stepratio <- sigma^2/(2 * dt) niter <- if(save.all) max(1L, round(stepratio)) else nsave * max(1L, round(stepratio/nsave)) if(niter > iterMax) { problem <- paste("Time step dt =", dt, "implies number of iterations =", niter, "exceeds maximum iterMax =", iterMax) if(!allow.adjust) stop(paste0(problem, "; increase dt or increase iterMax"), call.=FALSE) niter <- iterMax if(obey.nsave) niter <- nsave * max(1L, floor(as.double(niter)/nsave)) if(save.all) nsave <- niter dt <- sigma^2/(2 * niter) if(warn.adjust || verbose) { comment <- paste0(problem, "; niter reduced to iterMax and dt increased to ", dt) if(warn.adjust) warning(comment, call.=FALSE) if(verbose) splat(comment) } } if(verbose) { splat(" niter =", niter) splat(" nsave =", nsave) } } ## check dt satisfies basic constraint if((dt.known <- dt.given || niter.given)) { if(verbose) splat(" Validating dt") dtmax <- sigma^2/(2 * 10) if(finespacing) { dxmax <- lmin/3 dtmax <- min(dtmax, 0.95 * (dxmax^2)/AMbound, sigma * dxmax/6) } niterMin <- max(1L, round(sigma^2/(2 * dtmax))) if(niterMin > iterMax) stop(paste("Minimum number of iterations required is", niterMin, "which exceeds iterMax =", iterMax, "; increase iterMax or reduce sigma"), call.=FALSE) if(dt > dtmax) { #' allow rounding error really <- (dt > dtmax * one) dtOLD <- dt dt <- dtmax if(really) { gripe <- paste("Time step dt =", dtOLD, if(allow.adjust) "reduced to" else "exceeds", "maximum permitted value =", dtmax) if(!allow.adjust) stop(gripe, call.=FALSE) if(warn.adjust) warning(gripe, call.=FALSE) if(verbose) splat(gripe) if(niter.given) { niter <- max(1L, round(sigma^2/(2 * dt))) if(obey.nsave) niter <- nsave * max(1L, floor(as.double(niter)/nsave)) comment <- paste("niter adjusted to", niter) if(warn.adjust) warning(comment, call.=FALSE) if(verbose) splat(comment) if(save.all) { nsave <- niter if(verbose) splat(" nsave = niter =", nsave) } } } } } #' ------------- SPACING OF SAMPLE POINTS, dx --------------- if(dx.given) { if(verbose) splat(" Validating dx =", dx) check.finite(dx) stopifnot(dx > 0) if(finespacing && dx > lmin/3) stop(paste("dx must not exceed (shortest nonzero edge length)/3 =", signif(lmin/3, 6), "when finespacing=TRUE"), call.=FALSE) } else if(dt.known) { ## determine dx from dt if(verbose) splat(" Determine dx from dt") dx <- max(6 * dt/sigma^2, sqrt(dt * AMbound/0.95)) if(verbose) splat(" dx =", dx) } else { #' default rule if(verbose) splat(" Determine dx by default rule") dx <- min(lbar/fineNsplit, ltot/fineNlixels, lmin/3) if(verbose) { splat(" Mean Nonzero Edge Length/", fineNsplit, "=", lbar/fineNsplit) splat(" Total Network Length/", fineNlixels, "=", ltot/fineNlixels) splat(" Min Nonzero Edge Length/3 = ", lmin/3) splat(" Minimum of the above =", dx) } if(!finespacing && is.owin(W)) { W <- Frame(W) #' allow coarser spacing, determined by pixel size eps <- if(!is.null(eps)) min(eps) else if(!is.null(dimyx)) min(sidelengths(W)/rev(dimyx)) else if(!is.null(xy)) with(as.mask(W, xy=xy), min(xstep, ystep)) else min(sidelengths(W)/spatstat.options("npixel")) dx <- max(dx, eps/1.4) if(verbose) { splat(" Pixel size/1.4 =", eps/1.4) splat(" Coarse spacing rule: dx =", dx) } } else if(verbose) splat("Fine spacing rule: dx =", dx) nlixels <- ceiling(ltot/dx) nlixels <- min(nlixels, .Machine$integer.max) dx <- ltot/nlixels if(verbose) { splat(" Rounded total number of lixels =", nlixels) splat(" Adjusted dx =", dx) } } #' ------------- TIME STEP dt ---------------------------------- dtmax <- min(0.95 * (dx^2)/AMbound, sigma^2/(2 * 10), sigma * dx/6) if(verbose) splat(" Applying full set of constraints") if(!dt.known) { dt <- dtmax if(verbose) splat(" dt (determined by all constraints) = ", dt) } else if(dt > dtmax) { really <- (dt > dtmax * one) dtOLD <- dt dt <- dtmax if(really) { gripe <- paste("Time step dt =", dtOLD, if(allow.adjust) "reduced to" else "exceeds", "maximum permitted value =", dtmax) if(!allow.adjust) stop(gripe, call.=FALSE) if(warn.adjust) warning(gripe, call.=FALSE) if(verbose) splat(gripe) if(niter.given) { niter <- max(1L, round(sigma^2/(2 * dt))) if(obey.nsave) niter <- nsave * max(1L, floor(as.double(niter)/nsave)) comment <- paste("niter adjusted to", niter) if(warn.adjust) warning(comment, call.=FALSE) if(verbose) splat(comment) if(save.all) { nsave <- niter if(verbose) splat(" nsave = niter =", nsave) } } } } #' finally determine the number of iterations, if not already done. if(is.null(niter)) { niter <- if(save.all) max(1L, round(sigma^2/(2 * dt))) else nsave * max(1L, round(sigma^2/(nsave * 2 * dt))) dt <- sigma^2/(2 * niter) if(verbose) { splat(" Number of iterations", paren(paste0("determined from dt", if(obey.nsave) " and nsave" else NULL)), "=", niter) splat(" Updated dt =", dt) } if(save.all) { nsave <- niter if(verbose) splat(" nsave = niter =", nsave) } } if(niter > iterMax) stop(paste("Required number of iterations =", niter, "exceeds iterMax =", iterMax, "; either increase iterMax, dx, dt or reduce sigma"), call.=FALSE) alpha <- dt/dx^2 if(verbose) splat(" alpha =", alpha) if(1 - maxdegree * alpha < 0) stop(paste0("Algorithm is unstable: alpha = ", stepnames[["time"]], "/", stepnames[["space"]], "^2 = ", alpha, " does not satisfy (maxdegree * alpha <= 1)", " where maxdegree = highest vertex degree = ", maxdegree, "; decrease time step ", stepnames[["time"]], ", or increase spacing ", stepnames[["space"]]), call.=FALSE) if(verbose) { splat(" Final values:") splat(" Time step dt = ", dt) splat(" Sample spacing dx = ", dx) splat(" Number of iterations niter = ", niter) splat(" Number of states saved nsave = ", nsave) } return(list(dt=dt, dx=dx, niter=niter, nsave=nsave)) } flatdensityfunlpp <- function(X, ..., disconnect=TRUE, weights=NULL, what=c("estimate", "var", "se")) { stopifnot(is.lpp(X)) trap.extra.arguments(...) what <- match.arg(what) L <- domain(X) nX <- npoints(X) if(is.null(weights)) { weights <- rep(1, nX) } else check.nvector(weights, nX) if(!disconnect) { #' constant intensity across entire network num <- sum(weights) vol <- volume(L) value <- switch(what, estimate = num/vol, var = num/vol^2, se = sqrt(num)/vol) fff <- function(x, y, seg, tp) { rep(value, length(x)) } } else { #' divide L into connected components and assign each vertex to a component vlab <- connected(L, what="labels") vlab <- factor(vlab) #' assign each segment to a component slab <- vlab[L$from] #' total length of each component slen <- lengths_psp(as.psp(L)) lenY <- tapplysum(slen, list(slab)) #' assign points of X to components xlab <- slab[coords(X)$seg] wY <- tapplysum(weights, list(xlab)) #' intensity of X in each component valY <- switch(what, estimate = wY/lenY, var = wY/lenY^2, se = sqrt(wY)/lenY) #' function returning intensity estimate on relevant component fff <- function(x, y, seg, tp) { valY[ slab[seg] ] } } result <- linfun(fff, L) return(result) } flatdensityatpointslpp <- function(X, ..., leaveoneout=TRUE, disconnect=TRUE, weights=NULL, what=c("estimate", "var", "se")) { stopifnot(is.lpp(X)) trap.extra.arguments(...) what <- match.arg(what) L <- domain(X) nX <- npoints(X) if(nX == 0) return(numeric(0)) if(is.null(weights)) { weights <- rep(1, nX) } else check.nvector(weights, nX) if(!disconnect) { #' constant intensity across entire network totlen <- volume(L) numX <- rep(sum(weights), nX) if(leaveoneout) numX <- numX - weights valX <- switch(what, estimate = numX/totlen, var = numX/totlen^2, se = sqrt(numX)/totlen) } else { #' divide L into connected components and assign each vertex to a component vlab <- connected(L, what="labels") vlab <- factor(vlab) #' assign each segment to a component slab <- vlab[L$from] #' total length of each component slen <- lengths_psp(as.psp(L)) lenY <- tapplysum(slen, list(slab)) #' assign points of X to components Xlab <- slab[coords(X)$seg] #' number of points in each component (or total weight in each component) sumY <- tapplysum(weights, list(Xlab)) #' look up relevant values for each point of X numX <- sumY[ Xlab ] lenX <- lenY[ Xlab ] #' subtract contribution from point itself if(leaveoneout) numX <- numX - weights #' intensity in each component valX <- switch(what, estimate = numX/lenX, var = numX/lenX^2, se = sqrt(numX)/lenX) } return(valX) } spatstat.linnet/R/heatapprox.R0000644000176200001440000000230014144334007016077 0ustar liggesusers#' #' heatapprox.R #' #' Approximation to the heat kernel kappa(u,u) on a network, #' using only paths on the current segment. #' #' Copyright (c) Greg McSwiggan and Adrian Baddeley 2017-2020 #' #' $Revision: 1.4 $ $Date: 2021/01/07 03:54:20 $ #' heatkernelapprox <- function(X, sigma, nmax=20, floored=TRUE) { stopifnot(is.lpp(X)) nX <- npoints(X) if(nX == 0) return(numeric(0)) check.nvector(sigma, nX, oneok=TRUE) stopifnot(all(sigma > 0)) if(length(sigma) == 1) sigma <- rep(sigma, nX) check.1.integer(nmax) lenf <- lengths_psp(as.psp(domain(X))) coo <- coords(X) seg <- coo$seg len <- lenf[seg] pos <- len * coo$tp L <- domain(X) vv <- vertexdegree(L) dleft <- vv[L$from[seg]] dright <- vv[L$to[seg]] z <- .C(SL_heatApprox, n = as.integer(nX), a = as.double(len), x = as.double(pos), y = as.double(pos), #sic s = as.double(sigma), degl = as.integer(dleft), degr = as.integer(dright), m = as.integer(nmax), z = as.double(numeric(nX)), PACKAGE="spatstat.linnet") ans <- z$z if(floored) ans <- pmax(ans, 1/volume(L)) return(ans) } spatstat.linnet/R/densitylppVoronoi.R0000644000176200001440000001552514144334010017502 0ustar liggesusers#' #' densitylppVoronoi.R #' #' densityVoronoi.lpp #' #' $Revision: 1.14 $ $Date: 2021/07/01 11:31:50 $ #' densityVoronoi.lpp <- function(X, f = 1, ..., nrep = 1, verbose = TRUE){ # Check input stopifnot(is.lpp(X)) check.1.real(f) if(badprobability(f)) stop("f should be a probability between 0 and 1") check.1.integer(nrep) stopifnot(nrep >= 1) #' secret argument what <- resolve.1.default(list(what="image"), list(...)) what <- match.arg(what, c("image", "function")) if(f == 0 || npoints(X) == 0) { #' uniform estimate lambdabar <- intensity(unmark(X)) fun <- function(x, y, seg, tp) { rep(lambdabar, length(seg)) } g <- linfun(fun, domain(X)) if(what == "image") g <- as.linim(g, ...) return(g) } if(f == 1) { #' Voronoi estimate if(!anyDuplicated(X)) { tes <- lineardirichlet(X) num <- 1 } else { um <- uniquemap(X) first <- (um == seq_along(um)) UX <- X[first] tes <- lineardirichlet(UX) num <- as.integer(table(factor(um, levels=um[first]))) } v <- tile.lengths(tes) g <- as.linfun(tes, values=num/v, navalue=0) if(what == "image") g <- as.linim(g, ...) return(g) } #' Smoothed Voronoi estimate. #' For each repetition calculate Dirichlet tessellation; #' save information in a list of dataframes; and save the #' corresponding intensity values (i.e. inverse tile lengths) #' in a list of vectors. dflist <- tilevalueslist <- vector("list", nrep) blankentry <- data.frame(seg = integer(0), t0 = numeric(0), t1 = numeric(0), tile = integer(0)) for (i in 1:nrep) { Xthin <- rthin(X, f) if(npoints(Xthin) == 0){ tilevalueslist[[i]] <- 0 dflist[[i]] <- blankentry } else { if(!anyDuplicated(Xthin)) { tes <- lineardirichlet(Xthin) num <- 1 } else { um <- uniquemap(Xthin) first <- (um == seq_along(um)) UXthin <- Xthin[first] tes <- lineardirichlet(UXthin) num <- as.integer(table(factor(um, levels=um[first]))) } v <- tile.lengths(tes) tilevalueslist[[i]] <- num/v dflist[[i]] <- tes$df } } #' Make the result into a function on the linear network fun <- function(x, y, seg, tp) { result <- numeric(length(seg)) for(j in 1:nrep){ dfj <- dflist[[j]] if(nrow(dfj) > 0) { #' classify query points by tessellation k <- lineartileindex(seg, tp, dfj) #' add Voronoi estimate lamj <- tilevalueslist[[j]] if(!anyNA(k)) { result <- result + lamj[k] } else { ok <- !is.na(k) result[ok] <- result[ok] + lamj[k[ok]] } } } return(result/(nrep*f)) } g <- linfun(fun, domain(X)) if(what == "image") g <- as.linim(g, ...) return(g) } bw.voronoi <- function(X, ..., probrange = c(0.2,0.8), nprob = 10, prob = NULL, nrep = 100, verbose = TRUE, warn=TRUE){ stopifnot(is.lpp(X)) trap.extra.arguments(..., .Context="in bw.voronoi") if(!is.null(prob)) { stopifnot(is.numeric(prob) && is.vector(prob)) nprob <- length(prob) } else { check.range(probrange) prob <- seq(from=probrange[1L], to=probrange[2L], length.out=nprob) } check.1.integer(nrep) nX <- npoints(X) cooX <- coords(X) segX <- cooX$seg tpX <- cooX$tp if(nX == 0) return(max(prob)) if(verbose) { cat("Performing", nrep, "replicates... ") pstate <- list() } lamhat <- array(, dim=c(nX, nprob, nrep)) for(irep in seq_len(nrep)) { if(verbose) pstate <- progressreport(irep, nrep, state=pstate) U <- runif(nX) for(j in seq_len(nprob)) { pj <- prob[j] retain <- (U <= pj) if(any(retain)) { Xp <- X[retain] #' compute leave-one-out estimates for points in Xp lamhat[retain, j, irep] <- looVoronoiLPP(Xp)/pj #' compute leave-one-out estimates for other points if(any(extra <- !retain)) { tess <- lineardirichlet(Xp) idx <- as.integer(lineartileindex(segX[extra], tpX[extra], tess)) lamhat[extra, j, irep] <- 1/(pj * tile.lengths(tess)[idx]) } } else lamhat[,j,irep] <- 0 } } lamhat <- apply(lamhat, c(1,2), mean) cv <- colSums(log(lamhat)) result <- bw.optim(cv, prob, optimum="max", creator="bw.voronoi", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames=c("probrange", "prob"), unitname=NULL) return(result) } looVoronoiLPP <- function(X) { #' Compute leave-one-out Voronoi intensity estimate #' Hacked from 'lineardirichlet' nX <- npoints(X) if(nX == 0) return(numeric(0)) #' Unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] nuX <- npoints(uX) #' trivial case if(nuX <= 1) return(rep(1/volume(domain(X)), nX)) #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' nearest neighbour of each data point, in sorted unique pattern nnid <- nnwhich(uX[oo]) #' for each data point Y[i] in the sorted pattern Y, #' find the label of the tile that will cover Y[i] when Y[i] is removed neighlab <- coUXord$lab[nnid] #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths_psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' For each vertex of network, find nearest and second-nearest data points a <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=2) vnndist <- a$vnndist vnnwhich <- a$vnnwhich vnnlab <- coUXord$lab[vnnwhich] # index into original data pattern vnnlab <- matrix(vnnlab, ncol=2) #' compute result for each unique point lenf <- numeric(nuX) for(i in seq_len(nuX)) { #' compute Dirichlet tessellation WITHOUT point i coo.i <- coUXord[-i, , drop=FALSE] usenearest <- (vnnwhich[,1] != i) vnd <- ifelse(usenearest, vnndist[,1], vnndist[,2]) vnw <- ifelse(usenearest, vnnwhich[,1], vnnwhich[,2]) vnl <- ifelse(usenearest, vnnlab[,1], vnnlab[,2]) adjust <- (vnw > i) vnw[adjust] <- vnw[adjust] - 1L df <- ldtEngine(nv, ns, from, to, seglen, huge, coo.i, vnd, vnw, vnl) #' tile label of nearest neighbour neigh <- neighlab[i] #' find tile length associated with nearest neighbour of point i lenf[i] <- with(df, sum((tile == neigh) * seglen[seg] * (t1-t0))) } #' put back in correct place result <- numeric(npoints(X)) result[ii[oo]] <- 1/lenf return(result) } spatstat.linnet/R/lineardisc.R0000644000176200001440000002115314144334007016050 0ustar liggesusers# # # disc.R # # $Revision: 1.34 $ $Date: 2021/01/07 03:54:12 $ # # Compute the disc of radius r in a linear network # # lineardisc <- function(L, x=locator(1), r, plotit=TRUE, cols=c("blue", "red", "green"), add=TRUE) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) check.1.real(r) if(L$sparse) { message("Converting linear network to non-sparse representation..") L <- as.linnet(L, sparse=FALSE) message("Done.") } lines <- L$lines vertices <- L$vertices lengths <- lengths_psp(lines) win <- L$window marx <- marks(lines) ## if(missing(x) || is.null(x)) x <- clickppp(1, win, add=TRUE) if(is.lpp(x) && identical(L, domain(x))) { ## extract local coordinates stopifnot(npoints(x) == 1) coo <- coords(x) startsegment <- coo$seg startfraction <- coo$tp } else { ## interpret x as 2D location x <- as.ppp(x, win) stopifnot(npoints(x) == 1) ## project x to nearest segment pro <- project2segment(x, lines) ## which segment? startsegment <- pro$mapXY ## parametric position of x along this segment startfraction <- pro$tp } ## vertices at each end of this segment A <- L$from[startsegment] B <- L$to[startsegment] # distances from x to A and B dxA <- startfraction * lengths[startsegment] dxB <- (1-startfraction) * lengths[startsegment] # is r large enough to reach both A and B? startfilled <- (max(dxA, dxB) <= r) # compute vector of shortest path distances from x to each vertex j, # going through A: dxAv <- dxA + L$dpath[A,] # going through B: dxBv <- dxB + L$dpath[B,] # going either through A or through B: dxv <- pmin.int(dxAv, dxBv) # Thus dxv[j] is the shortest path distance from x to vertex j. # # Determine which vertices are inside the disc of radius r covered <- (dxv <= r) # Thus covered[j] is TRUE if the j-th vertex is inside the disc. # # Determine which line segments are completely inside the disc # from <- L$from to <- L$to # ( a line segment is inside the disc if the shortest distance # from x to one of its endpoints, plus the length of the segment, # is less than r .... allinside <- (dxv[from] + lengths <= r) | (dxv[to] + lengths <= r) # ... or alternatively, if the sum of the # two residual distances exceeds the length of the segment ) residfrom <- pmax.int(0, r - dxv[from]) residto <- pmax.int(0, r - dxv[to]) allinside <- allinside | (residfrom + residto >= lengths) # start segment is special allinside[startsegment] <- startfilled # Thus allinside[k] is TRUE if the k-th segment is inside the disc # Collect all these segments disclines <- lines[allinside] # # Determine which line segments cross the boundary of the disc boundary <- (covered[from] | covered[to]) & !allinside # For each of these, calculate the remaining distance at each end resid.from <- ifelseXB(boundary, pmax.int(r - dxv[from], 0), 0) resid.to <- ifelseXB(boundary, pmax.int(r - dxv[to], 0), 0) # Where the remaining distance is nonzero, create segment and endpoint okfrom <- (resid.from > 0) okfrom[startsegment] <- FALSE if(any(okfrom)) { v0 <- vertices[from[okfrom]] v1 <- vertices[to[okfrom]] tp <- (resid.from/lengths)[okfrom] vfrom <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesfrom <- as.psp(from=v0, to=vfrom) if(!is.null(marx)) marks(extralinesfrom) <- marx %msub% okfrom } else vfrom <- extralinesfrom <- NULL # okto <- (resid.to > 0) okto[startsegment] <- FALSE if(any(okto)) { v0 <- vertices[to[okto]] v1 <- vertices[from[okto]] tp <- (resid.to/lengths)[okto] vto <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesto <- as.psp(from=v0, to=vto) if(!is.null(marx)) marks(extralinesto) <- marx %msub% okto } else vto <- extralinesto <- NULL # # deal with special case where start segment is not fully covered if(!startfilled) { vA <- vertices[A] vB <- vertices[B] rfrac <- r/lengths[startsegment] tleft <- pmax.int(startfraction-rfrac, 0) tright <- pmin.int(startfraction+rfrac, 1) vleft <- ppp((1-tleft) * vA$x + tleft * vB$x, (1-tleft) * vA$y + tleft * vB$y, window=win) vright <- ppp((1-tright) * vA$x + tright * vB$x, (1-tright) * vA$y + tright * vB$y, window=win) startline <- as.psp(from=vleft, to=vright) if(!is.null(marx)) marks(startline) <- marx %msub% startsegment startends <- superimpose(if(!covered[A]) vleft else NULL, if(!covered[B]) vright else NULL) } else startline <- startends <- NULL # # combine all lines disclines <- superimpose(disclines, extralinesfrom, extralinesto, startline, W=win, check=FALSE) # combine all disc endpoints discends <- superimpose(vfrom, vto, vertices[dxv == r], startends, W=win, check=FALSE) # if(plotit) { if(!add || dev.cur() == 1) plot(L, main="") plot(as.ppp(x), add=TRUE, cols=cols[1L], pch=16) plot(disclines, add=TRUE, col=cols[2L], lwd=2) plot(discends, add=TRUE, col=cols[3L], pch=16) } return(list(lines=disclines, endpoints=discends)) } countends <- function(L, x=locator(1), r, toler=NULL, internal=list()) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) sparse <- L$sparse %orifnull% is.null(L$dpath) if(sparse) stop(paste("countends() does not support linear networks", "that are stored in sparse matrix format.", "Please convert the data using as.linnet(sparse=FALSE)"), call.=FALSE) # get x if(missing(x)) x <- clickppp(1, Window(L), add=TRUE) if(!inherits(x, "lpp")) x <- as.lpp(x, L=L) np <- npoints(x) if(length(r) != np) stop("Length of vector r does not match number of points in x") ## determine whether network is connected iscon <- internal$is.connected %orifnull% is.connected(L) if(!iscon) { #' disconnected network - split into components result <- numeric(np) lab <- internal$connected.labels %orifnull% connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(lab)) for(subi in subsets) { xi <- thinNetwork(x, retainvertices=subi) witch <- which(attr(xi, "retainpoints")) ok <- is.finite(r[witch]) witchok <- witch[ok] result[witchok] <- countends(domain(xi), xi[ok], r[witchok], toler=toler, internal=list(is.connected=TRUE)) } return(result) } lines <- L$lines vertices <- L$vertices lengths <- lengths_psp(lines) dpath <- L$dpath nv <- vertices$n ns <- lines$n # if(!spatstat.options("Ccountends")) { #' interpreted code result <- integer(np) for(i in seq_len(np)) result[i] <- npoints(lineardisc(L, x[i], r[i], plotit=FALSE)$endpoints) return(result) } # extract coordinates coo <- coords(x) #' which segment startsegment <- coo$seg # parametric position of x along this segment startfraction <- coo$tp # convert indices to C seg0 <- startsegment - 1L from0 <- L$from - 1L to0 <- L$to - 1L # determine numerical tolerance if(is.null(toler)) { toler <- default.linnet.tolerance(L) } else { check.1.real(toler) stopifnot(toler > 0) } zz <- .C(SL_Ccountends, np = as.integer(np), f = as.double(startfraction), seg = as.integer(seg0), r = as.double(r), nv = as.integer(nv), xv = as.double(vertices$x), yv = as.double(vertices$y), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), lengths = as.double(lengths), toler=as.double(toler), nendpoints = as.integer(integer(np)), PACKAGE="spatstat.linnet") zz$nendpoints } default.linnet.tolerance <- function(L) { # L could be a linnet or psp if(!is.null(toler <- L$toler)) return(toler) len2 <- lengths_psp(as.psp(L), squared=TRUE) len2pos <- len2[len2 > 0] toler <- if(length(len2pos) == 0) 0 else (0.001 * sqrt(min(len2pos))) toler <- makeLinnetTolerance(toler) return(toler) } makeLinnetTolerance <- function(toler) { max(sqrt(.Machine$double.xmin), toler[is.finite(toler)], na.rm=TRUE) } spatstat.linnet/R/rcelllpp.R0000644000176200001440000000447414144334010015551 0ustar liggesusers#' rcelllpp.R #' #' Analogue of Baddeley-Silverman cell process for linear network. #' #' (plus analogue of Switzer's process) #' #' $Revision: 1.3 $ $Date: 2020/03/16 10:28:51 $ rcelllpp <- local({ rcelllpp <- function(L, lambda, rnumgen=NULL, ..., saveid=FALSE) { if(inherits(L, "lintess")) { LT <- L L <- as.linnet(LT) } else if(inherits(L, "linnet")) { #' default tessellation: each segment is a tile ns <- nsegments(L) df <- data.frame(seg=1:ns, t0=0, t1=1, tile=1:ns) LT <- lintess(L, df) } else stop("L should be a linnet or lintess") #' extract list of tiles df <- LT$df #' add required data df$len <- lengths_psp(as.psp(L))[df$seg] #' generate random points st <- by(df, df$tile, addpoints, lambda=lambda, rnumgen=rnumgen, ...) st <- Reduce(rbind, st) X <- lpp(st, L) if(saveid) attr(X, "cellid") <- marks(cut(X, LT)) return(X) } addpoints <- function(df, lambda=1, rnumgen=NULL, ...) { #' take a subset of the data frame representing one tile of the tessellation #' Add random points in this subset. piecelengths <- df$len tilelength <- sum(piecelengths) mu <- tilelength * lambda n <- if(is.null(rnumgen)) rcellnumber(1, mu=mu) else rnumgen(1, mu, ...) if(n == 0) return(data.frame(seg=integer(0), tp=numeric(0))) u <- runif(n, max=tilelength) csp <- c(0, cumsum(piecelengths)) i <- findInterval(u, csp, rightmost.closed=TRUE, all.inside=TRUE) seg <- df$seg[i] tp <- df$t0[i] + (df$t1 - df$t0)[i] * (u - csp[i])/piecelengths[i] return(data.frame(seg=seg, tp=tp)) } rcelllpp }) rSwitzerlpp <- local({ rSwitzerlpp <- function(L, lambdacut, rintens=rexp, ..., cuts=c("points", "lines")) { stopifnot(inherits(L, "linnet")) cuts <- match.arg(cuts) switch(cuts, points = { X <- rpoislpp(lambdacut, L) LT <- divide.linnet(X) }, lines = { X <- rpoisline(lambdacut, L) X <- attr(X, "lines") LT <- chop.linnet(L, X) }) Z <- rcelllpp(LT, 1, rNswitzer, rintens=rintens, ...) attr(Z, "breaks") <- X return(Z) } rNswitzer <- function(n, mu, rintens=rexp, ...) { rpois(n, mu * rintens(n, ...)) } rSwitzerlpp }) spatstat.linnet/R/auclpp.R0000644000176200001440000000231614144334007015217 0ustar liggesusers## ## auclpp.R ## ## Calculate ROC curve or area under it ## ## code for linear networks ## ## $Revision: 1.1 $ $Date: 2020/06/17 04:32:13 $ roc.lpp <- function(X, covariate, ..., high=TRUE) { nullmodel <- lppm(X) result <- rocData(covariate, nullmodel, ..., high=high) return(result) } roc.lppm <- function(X, ...) { stopifnot(is.lppm(X)) model <- X lambda <- predict(model, ...) Y <- X$X nullmodel <- lppm(Y) result <- rocModel(lambda, nullmodel, ...) return(result) } # ...................................................... auc.lpp <- function(X, covariate, ..., high=TRUE) { d <- spatialCDFframe(lppm(X), covariate, ...) U <- d$values$U EU <- mean(U) result <- if(high) EU else (1 - EU) return(result) } auc.lppm <- function(X, ...) { stopifnot(inherits(X, "lppm")) model <- X if(is.multitype(model)) { # cheat ro <- roc(model, ...) aobs <- with(ro, mean(fobs)) atheo <- with(ro, mean(ftheo)) } else { lambda <- predict(model, ...) Fl <- ecdf(lambda[]) lamX <- lambda[model$X] aobs <- mean(Fl(lamX)) atheo <- mean(lambda[] * Fl(lambda[]))/mean(lambda) } result <- c(aobs, atheo) names(result) <- c("obs", "theo") return(result) } spatstat.linnet/R/linfun.R0000644000176200001440000000765314144334007015237 0ustar liggesusers# # linfun.R # # Class of functions of location on a linear network # # $Revision: 1.15 $ $Date: 2021/08/25 08:19:10 $ # linfun <- function(f, L) { stopifnot(is.function(f)) stopifnot(inherits(L, "linnet")) fargs <- names(formals(f)) needargs <- c("x", "y", "seg", "tp") if(!all(needargs %in% fargs)) stop(paste("Function must have formal arguments", commasep(sQuote(needargs))), call.=FALSE) otherfargs <- setdiff(fargs, needargs) g <- function(...) { argh <- list(...) extra <- names(argh) %in% otherfargs if(!any(extra)) { X <- as.lpp(..., L=L) value <- do.call(f, as.list(coords(X))) } else { extrargs <- argh[extra] mainargs <- argh[!extra] X <- do.call(as.lpp, append(mainargs, list(L=L))) value <- do.call(f, append(as.list(coords(X)), extrargs)) } return(value) } class(g) <- c("linfun", class(g)) attr(g, "L") <- L attr(g, "f") <- f return(g) } print.linfun <- function(x, ...) { L <- as.linnet(x) if(!is.null(explain <- attr(x, "explain"))) { explain(x) } else { splat("Function on linear network:") print(attr(x, "f"), ...) splat("Function domain:") print(L) } invisible(NULL) } summary.linfun <- function(object, ...) { print(object, ...) } as.linim.linfun <- function(X, L=domain(X), ..., eps = NULL, dimyx = NULL, xy = NULL, delta=NULL, nd=NULL) { if(is.null(L)) L <- domain(X) #' create template typical <- X(runiflpp(1, L), ...) if(length(typical) != 1) stop(paste("The function must return a single value", "when applied to a single point")) Y <- as.linim(typical, L, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) # extract coordinates of sample points along network df <- attr(Y, "df") coo <- df[, c("x", "y", "mapXY", "tp")] colnames(coo)[3L] <- "seg" # evaluate function at sample points vals <- do.call(X, append(as.list(coo), list(...))) # write values in data frame df$values <- vals attr(Y, "df") <- df #' overwrite values in pixel array Y$v[] <- NA pix <- nearest.raster.point(df$xc, df$yc, Y) Y$v[cbind(pix$row, pix$col)] <- vals #' return(Y) } as.data.frame.linfun <- function(x, ...) { as.data.frame(as.linim(x, ...)) } as.linfun.linim <- function(X, ...) { trap.extra.arguments(..., .Context="as.linfun.linim") ## extract info L <- as.linnet(X) df <- attr(X, "df") ## function values and corresponding locations values <- df$values locations <- with(df, as.lpp(x=x, y=y, seg=mapXY, tp=tp, L=L)) ## Function that maps any spatial location to the nearest data location nearestloc <- nnfun(locations) ## Function that reads value at nearest data location f <- function(x, y, seg, tp) { values[nearestloc(x,y,seg,tp)] } g <- linfun(f, L) return(g) } plot.linfun <- function(x, ..., L=NULL, main) { if(missing(main)) main <- short.deparse(substitute(x)) if(is.null(L)) L <- as.linnet(x) argh <- list(...) fargnames <- get("otherfargs", envir=environment(x)) resolution <- c("eps", "dimyx", "xy", "delta", "nd") convert <- names(argh) %in% c(fargnames, resolution) Z <- do.call(as.linim, append(list(x, L=L), argh[convert])) rslt <- do.call(plot.linim, append(list(Z, main=main), argh[!convert])) return(invisible(rslt)) } as.owin.linfun <- function(W, ...) { as.owin(as.linnet(W)) } domain.linfun <- as.linnet.linfun <- function(X, ...) { attr(X, "L") } as.function.linfun <- function(x, ...) { nax <- names(attributes(x)) if(!is.null(nax)) { retain <- (nax == "srcref") attributes(x)[!retain] <- NULL } return(x) } integral.linfun <- function(f, domain=NULL, ..., delta, nd) { if(missing(delta)) delta <- NULL if(missing(nd)) nd <- NULL integral(as.linim(f, delta=delta, nd=nd), domain=domain, ...) } as.linfun <- function(X, ...) { UseMethod("as.linfun") } as.linfun.linfun <- function(X, ...) { return(X) } spatstat.linnet/R/quickndirty.R0000644000176200001440000001230214144334010016267 0ustar liggesusers#' #' quick-and-dirty KDE for points on a network #' #' Copyright (C) 2019 Adrian Baddeley, Suman Rakshit and Tilman Davies #' #' $Revision: 1.5 $ $Date: 2020/04/04 02:55:54 $ densityQuick.lpp <- function(X, sigma=NULL, ..., kernel="gaussian", at=c("pixels", "points"), what=c("estimate", "se", "var"), leaveoneout=TRUE, diggle = FALSE, edge2D = FALSE, weights=NULL, positive=FALSE) { #' kernel density estimation stopifnot(is.lpp(X)) what <- match.arg(what) if(is.function(sigma)) sigma <- sigma(X) qkdeEngine(X=X, sigma=sigma, kernel=kernel, at=at, what=what, leaveoneout=leaveoneout, diggle=diggle, edge2D=edge2D, weights=weights, positive=positive, ...) } qkdeEngine <- function(X, sigma=NULL, ..., at=c("pixels", "points"), what=c("estimate", "se", "var"), leaveoneout=TRUE, diggle = FALSE, raw=FALSE, edge2D = FALSE, edge = edge2D, weights=NULL, varcov=NULL, positive=FALSE, shortcut=TRUE, precomputed=NULL, savecomputed=FALSE) { stopifnot(is.lpp(X)) at <- match.arg(at) what <- match.arg(what) L <- domain(X) S <- as.psp(L) XX <- as.ppp(X) stuff <- resolve.2D.kernel(x=XX, sigma=sigma, varcov=varcov, ...) sigma <- stuff$sigma varcov <- stuff$varcov if(is.infinite(stuff$cutoff)) { #' infinite bandwidth result <- switch(at, pixels = { as.linim(flatdensityfunlpp(X, weights=weights, what=what)) }, points = { flatdensityatpointslpp(X, weights=weights, what=what) }) attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } switch(what, estimate = { if(shortcut) { PS <- precomputed$PS %orifnull% pixellate(S, ..., DivideByPixelArea=TRUE) KS <- blur(PS, sigma, normalise=edge2D, bleed=FALSE, ..., varcov=varcov) } else { KS <- density(S, sigma, ..., edge=edge2D, varcov=varcov) } if(diggle && !raw) weights <- (weights %orifnull% 1) / KS[XX] KX <- density(XX, sigma, ..., weights=weights, at=at, leaveoneout=leaveoneout, edge=edge2D, diggle=FALSE, positive=FALSE, varcov=varcov) }, se = , var= { tau <- taumat <- NULL if(is.null(varcov)) { varconst <- 1/(4 * pi * prod(ensure2vector(sigma))) tau <- sigma/sqrt(2) } else { varconst <- 1/(4 * pi * sqrt(det(varcov))) taumat <- varcov/2 } if(shortcut) { PS <- precomputed$PS %orifnull% pixellate(S, ..., DivideByPixelArea=TRUE) KS <- blur(PS, sigma, normalise=edge2D, bleed=FALSE, varcov=varcov)^2 } else { KS <- density(S, sigma, ..., edge=edge2D, varcov=varcov)^2 } if(diggle && !raw) weights <- (weights %orifnull% 1) / KS[XX] KX <- varconst * density(XX, sigma=tau, ..., weights=weights, at=at, leaveoneout=leaveoneout, edge=edge2D, diggle=FALSE, positive=FALSE, varcov=taumat) }) switch(at, points = { result <- if(diggle || raw) KX else (KX/(KS[XX])) if(positive) result <- pmax(result, .Machine$double.xmin) if(savecomputed) { #' save geometry info for re-use savedstuff <- list(PS=PS, M=solutionset(PS > 0), df=NULL) } }, pixels = { Z <- if(diggle || raw) KX else (KX/KS) M <- if(shortcut) { precomputed$M %orifnull% solutionset(PS > 0) } else as.mask.psp(S, KS) Z <- Z[M, drop=FALSE] #' build linim object, using precomputed sample points if available result <- linim(L, Z, restrict=FALSE, df=precomputed$df) if(positive) result <- eval.linim(pmax(result, .Machine$double.xmin)) if(savecomputed) { #' save geometry info for re-use dfg <- attr(result, "df") dfg <- dfg[, colnames(dfg) != "values"] savedstuff <- list(PS=PS, M=M, df=dfg) } }) if(what == "se") result <- sqrt(result) attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov if(raw) attr(result, "denominator") <- KS if(savecomputed) attr(result, "savedstuff") <- savedstuff return(result) } spatstat.linnet/R/deldirnet.R0000644000176200001440000000107614144334007015707 0ustar liggesusers#' #' deldirnet.R #' #' Interfaces to 'deldir' that produce linear networks #' #' $Revision: 1.1 $ $Date: 2020/06/14 10:34:00 $ dirichletNetwork <- function(X, ...) as.linnet(dirichletEdges(X), ...) delaunayNetwork <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir") nX <- npoints(X) if(nX == 0) return(NULL) if(nX == 1L) return(linnet(X, !diag(TRUE))) if(nX == 2L) return(linnet(X, !diag(c(TRUE,TRUE)))) dd <- safedeldir(X) if(is.null(dd)) return(NULL) joins <- as.matrix(dd$delsgs[, 5:6]) return(linnet(X, edges=joins)) } spatstat.linnet/R/linearpcf.R0000644000176200001440000001274514144334007015705 0ustar liggesusers# # linearpcf.R # # $Revision: 1.29 $ $Date: 2020/01/11 04:23:16 $ # # pair correlation function for point pattern on linear network # # linearpcf <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # compute denom <- np * (np - 1)/lengthL g <- linearpcfengine(X, r=r, ..., denom=denom, correction=correction, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") correction <- attr(g, "correction") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L](r)) fname <- c("g", "L") }, none = { ylab <- quote(g[net](r)) fname <- c("g", "net") }) g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname) # reattach bandwidth attr(g, "bw") <- bw attr(g, "correction") <- correction return(g) } linearpcfinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) { stopifnot(inherits(X, "lpp")) loo.given <- !missing(leaveoneout) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearpcf(X, r=r, ..., correction=correction, ratio=ratio) if(normalise) { check.1.real(normpower) stopifnot(normpower >= 1) } # extract info about pattern lengthL <- volume(domain(X)) # lambdaX <- getlambda.lpp(lambda, X, ..., update=update, leaveoneout=leaveoneout, loo.given=loo.given, lambdaname="lambda") # invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") denom <- if(!normalise) lengthL else if(normpower == 1) sum(invlam) else lengthL * (sum(invlam)/lengthL)^normpower g <- linearpcfengine(X, ..., r=r, reweight=invlam2, denom=denom, correction=correction, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") correction <- attr(g, "correction") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L, inhom](r)) fname <- c("g", "list(L, inhom)") }, none = { ylab <- quote(g[net, inhom](r)) fname <- c("g", "list(net, inhom)") }) g <- rebadge.fv(g, new.fname=fname, new.ylab=ylab) # reattach bandwidth attr(g, "bw") <- bw attr(g, "correction") <- correction attr(g, "dangerous") <- attr(lambdaX, "dangerous") return(g) } linearpcfengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", ratio=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # type <- if(correction == "Ang") "L" else "net" fname <- c("g", type) ylab <- substitute(g[type](r), list(type=type)) # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) g <- ratfv(df, NULL, 0, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname, ratio=ratio) if(correction == "Ang") { # tack on theoretical value g <- bind.ratfv(g, quotient = data.frame(theo=r), denominator = 0, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s", ratio=ratio) } attr(g, "correction") <- correction return(g) } # compute pairwise distances D <- pairdist(X) #--- compile into pcf --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(D, r, denom=denom, fname=fname, ratio=ratio) unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Wei's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountEnds(X, D, toler) edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(D, r, weights=wt, denom=denom, ..., fname=fname, ratio=ratio) # extract bandwidth bw <- attr(g, "bw") # tack on theoretical value g <- bind.ratfv(g, quotient = data.frame(theo=rep.int(1,length(r))), denominator = denom, labl = makefvlabel(NULL, NULL, fname, "pois"), desc = "theoretical Poisson %s", ratio = ratio) # tweak unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # tack on bandwidth again attr(g, "bw") <- bw attr(g, "correction") <- correction return(g) } spatstat.linnet/R/lintess.R0000644000176200001440000003361314144334007015420 0ustar liggesusers#' #' lintess.R #' #' Tessellations on a Linear Network #' #' $Revision: 1.43 $ $Date: 2021/01/07 03:53:35 $ #' lintess <- function(L, df, marks=NULL) { verifyclass(L, "linnet") if(missing(df) || is.null(df)) { # tessellation consisting of a single tile ns <- nsegments(L) df <- data.frame(seg=seq_len(ns), t0=0, t1=1, tile=factor(1)) return(lintess(L, df, marks)) } #' validate 'df' stopifnot(is.data.frame(df)) dfnames <- colnames(df) needed <- c("seg", "t0", "t1", "tile") if(any(bad <- is.na(match(needed, dfnames)))) stop(paste(ngettext(sum(bad), "Column", "Columns"), commasep(sQuote(needed[bad])), "missing from data frame"), call.=FALSE) #' straighten out df <- df[, needed] df$seg <- as.integer(df$seg) df$tile <- as.factor(df$tile) if(any(reversed <- with(df, t1 < t0))) df[reversed, c("t0", "t1")] <- df[reversed, c("t1", "t0")] with(df, { segU <- sortunique(seg) segN <- seq_len(nsegments(L)) if(length(omitted <- setdiff(segN, segU)) > 0) stop(paste(ngettext(length(omitted), "Segment", "Segments"), commasep(omitted), "omitted from data"), call.=FALSE) if(length(unknown <- setdiff(segU, segN)) > 0) stop(paste(ngettext(length(unknown), "Segment", "Segments"), commasep(unknown), ngettext(length(unknown), "do not", "does not"), "exist in the network"), call.=FALSE) pieces <- split(df, seg) for(piece in pieces) { t0 <- piece$t0 t1 <- piece$t1 thedata <- paste("Data for segment", piece$seg[[1L]]) if(!any(t0 == 0)) stop(paste(thedata, "do not contain an entry with t0 = 0"), call.=FALSE) if(!any(t1 == 1)) stop(paste(thedata, "do not contain an entry with t1 = 1"), call.=FALSE) if(any(t1 < 1 & is.na(match(t1, t0))) | any(t0 > 0 & is.na(match(t0, t1)))) stop(paste(thedata, "are inconsistent"), call.=FALSE) } }) #' validate marks if(!is.null(marks)) { marks <- as.data.frame(marks) nr <- nrow(marks) nt <- length(levels(df$tile)) if(nr == 1L) { marks <- marks[rep(1, nt), , drop=FALSE] row.names(marks) <- 1:nt nr <- nt } else if(nr != nt) { stop(paste("Wrong number of", ngettext(ncol(marks), "mark values:", "rows of mark values:"), nr, "should be", nt), call.=FALSE) } } out <- list(L=L, df=df, marks=marks) class(out) <- c("lintess", class(out)) return(out) } print.lintess <- function(x, ...) { splat("Tessellation on a linear network") nt <- length(levels(x$df$tile)) splat(nt, ngettext(nt, "tile", "tiles")) if(anyNA(x$df$tile)) splat("[An additional tile is labelled NA]") if(!is.null(marx <- x$marks)) { mvt <- markvaluetype(marx) if(length(mvt) == 1) { splat("Tessellation has", mvt, "marks") } else { splat("Tessellation has mark variables", commasep(paste(colnames(marx), paren(mvt)))) } } return(invisible(NULL)) } nobjects.lintess <- function(x) { length(levels(x$df$tile)) } tile.lengths <- function(x) { if(!inherits(x, "lintess")) stop("x should be a tessellation on a linear network (class 'lintess')", call.=FALSE) seglen <- lengths_psp(as.psp(x$L)) df <- x$df df$fraglen <- with(df, seglen[seg] * (t1-t0)) tilelen <- with(df, tapplysum(fraglen, list(tile))) return(tilelen) } tilenames.lintess <- function(x) { levels(x$df$tile) } "tilenames<-.lintess" <- function(x, value) { levels(x$df$tile) <- value return(x) } marks.lintess <- function(x, ...) { x$marks } "marks<-.lintess" <- function(x, ..., value) { if(!is.null(value)) { value <- as.data.frame(value) nt <- length(levels(x$df$tile)) if(nrow(value) != nt) stop(paste("replacement value for marks has wrong length:", nrow(value), "should be", nt), call.=FALSE) rownames(value) <- NULL if(ncol(value) == 1) colnames(value) <- "marks" } x$marks <- value return(x) } unmark.lintess <- function(X) { X$marks <- NULL return(X) } summary.lintess <- function(object, ...) { df <- object$df lev <- levels(df$tile) nt <- length(lev) nr <- nrow(df) seglen <- lengths_psp(as.psp(object$L)) df$fraglen <- with(df, seglen[seg] * (t1-t0)) tilelen <- with(df, tapplysum(fraglen, list(tile))) hasna <- anyNA(df$tile) nalen <- if(hasna) (sum(seglen) - sum(tilelen)) else 0 marx <- object$marks if(!is.null(marx)) { mvt <- markvaluetype(marx) names(mvt) <- colnames(marx) marx <- summary(marx) } else mvt <- NULL y <- list(nt=nt, nr=nr, lev=lev, seglen=seglen, tilelen=tilelen, hasna=hasna, nalen=nalen, marx=marx, mvt=mvt) class(y) <- c("summary.lintess", class(y)) return(y) } print.summary.lintess <- function(x, ...) { splat("Tessellation on a linear network") with(x, { splat(nt, "tiles") if(hasna) splat("[An additional tile is labelled NA]") if(nt <= 30) { splat("Tile labels:", paste(lev, collapse=" ")) splat("Tile lengths:") print(signif(tilelen, 4)) } else { splat("Tile lengths (summary):") print(summary(tilelen)) } if(hasna) splat("Tile labelled NA has length", nalen) if(!is.null(marx)) { splat("Tessellation is marked") if(length(mvt) == 1) { splat("Marks are of type", sQuote(mvt)) } else { splat("Mark variables:", commasep(paste(names(mvt), paren(unname(mvt))))) } splat("Summary of marks:") print(marx) } }) return(invisible(NULL)) } plot.lintess <- local({ plot.lintess <- function(x, ..., main, add=FALSE, style=c("colour", "width", "image"), col=NULL, values=marks(x), ribbon=TRUE, ribargs=list(), multiplot=TRUE, do.plot=TRUE ) { if(missing(main)) main <- short.deparse(substitute(x)) style <- match.arg(style) df <- x$df ntiles <- length(levels(df$tile)) #' Associate 'values' with tiles if(markformat(values) == "hyperframe") values <- as.data.frame(values) #' automatic warning switch(markformat(values), none = { #' no values assigned. #' default is tile name tn <- tilenames(x) values <- factor(tn, levels=tn) }, vector = { #' vector of values. #' validate length of vector check.anyvector(values, ntiles, things="tiles") }, dataframe = { #' data frame or matrix of values. values <- as.data.frame(values) if(nrow(values) != ntiles) stop(paste("Number of rows of values =", nrow(values), "!=", ntiles, "= number of tiles"), call.=FALSE) if(multiplot && ncol(values) > 1 && !add) { #' Multiple Panel Plot result <- multi.plot.lintess(x, ..., style=style, main=main, do.plot=do.plot, ribbon=ribbon, ribargs=ribargs, col=col) return(invisible(result)) } if(ncol(values) > 1) warning("Using only the first column of values") values <- values[,1] }, stop("Format of values is not understood") ) #' Single Panel Plot if(style == "image") { z <- plot(as.linfun(x, values=values), main=main, ..., add=add, do.plot=do.plot, ribbon=ribbon, ribargs=ribargs, col=col) return(invisible(z)) } #' convert to marked psp object L <- as.linnet(x) from <- L$from[df$seg] to <- L$to[df$seg] V <- vertices(L) vx <- V$x vy <- V$y segdata <- with(df, list(x0=vx[from] * (1-t0) + vx[to] * t0, y0=vy[from] * (1-t0) + vy[to] * t0, x1=vx[from] * (1-t1) + vx[to] * t1, y1=vy[from] * (1-t1) + vy[to] * t1, marks=values[as.integer(tile)])) S <- as.psp(segdata, window=Window(L)) cmap <- plot(S, style=style, add=add, do.plot=do.plot, main=main, ribbon=ribbon, ribargs=ribargs, col=col, ...) return(invisible(cmap)) } multi.plot.lintess <- function(x, ..., zlim=NULL, col=NULL, equal.ribbon=FALSE) { if(equal.ribbon && is.null(zlim) && !inherits(col, "colourmap")) zlim <- range(marks(x)) if(!is.null(zlim)) { result <- plot(unstack(x), ..., zlim=zlim, col=col) } else { result <- plot(unstack(x), ..., col=col) } return(invisible(result)) } plot.lintess }) unstack.lintess <- function(x, ...) { marx <- marks(x) if(is.null(marx) || is.null(dim(marx)) || ncol(marx) <= 1) return(solist(x)) ux <- unmark(x) y <- solapply(as.list(marx), setmarks, x=ux) return(y) } as.owin.lintess <- function(W, ...) { as.owin(as.linnet(W), ...) } Window.lintess <- function(X, ...) { as.owin(as.linnet(X)) } domain.lintess <- as.linnet.lintess <- function(X, ...) { X$L } as.data.frame.lintess <- function(x, ...) { df <- x$df if(!is.null(marx <- marks(x))) { marx <- as.data.frame(marx) if(ncol(marx) == 1L) colnames(marx) <- "marks" marx <- marx[as.integer(df$tile), , drop=FALSE] df <- cbind(df, marx) } df <- as.data.frame(df, ...) return(df) } lineartileindex <- function(seg, tp, Z, method=c("encode", "C", "interpreted")) { method <- match.arg(method) df <- if(inherits(Z, "lintess")) Z$df else if(is.data.frame(Z)) Z else stop("Format of Z is unrecognised") switch(method, interpreted = { n <- length(seg) #' extract tessellation data tilenames <- levels(df$tile) answer <- factor(rep(NA_integer_, n), levels=seq_along(tilenames), labels=tilenames) for(i in seq_along(seg)) { tpi <- tp[i] segi <- seg[i] j <- which(df$seg == segi) kk <- which(df[j, "t0"] <= tpi & df[j, "t1"] >= tpi) answer[i] <- if(length(kk) == 0) NA else df[j[min(kk)], "tile"] } }, encode = { #' encode locations as numeric loc <- seg - 1 + tp #' extract tessellation data and sort them df <- df[order(df$seg, df$t0), , drop=FALSE] m <- nrow(df) #' encode breakpoints as numeric bks <- with(df, c(seg - 1 + t0, seg[m])) #' which piece contains each query point jj <- findInterval(loc, bks, left.open=TRUE, all.inside=TRUE, rightmost.closed=TRUE) answer <- df$tile[jj] }, C = { #' sort query data oo <- order(seg, tp) seg <- seg[oo] tp <- tp[oo] n <- length(seg) #' extract tessellation data and sort them df <- df[order(df$seg, df$t0), , drop=FALSE] m <- nrow(df) #' handle factor dftile <- df$tile tilecode <- as.integer(dftile) tilenames <- levels(dftile) #' launch z <- .C(SL_lintileindex, n = as.integer(n), seg = as.integer(seg), tp = as.double(tp), dfn = as.integer(m), dfseg = as.integer(df$seg), dft0 = as.double(df$t0), dft1 = as.double(df$t1), dftile = as.integer(tilecode), answer = as.integer(integer(n)), PACKAGE="spatstat.linnet") z <- z$answer z[z == 0] <- NA answer <- integer(n) answer[oo] <- z answer <- factor(answer, levels=seq_along(tilenames), labels=tilenames) }) return(answer) } as.linfun.lintess <- local({ as.linfun.lintess <- function(X, ..., values=marks(X), navalue=NA) { Xdf <- X$df tilenames <- levels(Xdf$tile) value.is.tile <- is.null(values) if(value.is.tile) { tilevalues <- factor(tilenames, levels=tilenames) } else { if(!is.null(dim(values))) values <- values[,1] if(length(values) != length(tilenames)) stop("Length of 'values' should equal the number of tiles", call.=FALSE) tilevalues <- values } f <- function(x, y, seg, tp) { k <- as.integer(lineartileindex(seg, tp, Xdf)) if(!anyNA(k)) { result <- tilevalues[k] } else { ok <- !is.na(k) result <- rep(navalue, length(seg)) result[ok] <- tilevalues[k[ok]] } return(result) } g <- linfun(f, X$L) attr(g, "explain") <- uitleggen return(g) } uitleggen <- function(x, ...) { envf <- environment(attr(x, "f")) Xdf <- get("Xdf", envir=envf) value.is.tile <- get("value.is.tile", envir=envf) %orifnull% FALSE if(value.is.tile) { valuename <- "the tile index" } else { tilevalues <- get("tilevalues", envir=envf) valuetype <- typeof(tilevalues) valuename <- paste("a value", paren(sQuote(valuetype)), "associated with each tile") } splat("Function on a network, associated with a tessellation,") splat("\treturns", valuename) nt <- length(levels(Xdf$tile)) splat("Tessellation has", nt, ngettext(nt, "tile", "tiles")) splat("Function domain:") print(as.linnet(x)) return(invisible(NULL)) } as.linfun.lintess }) spatstat.linnet/R/lintessmakers.R0000644000176200001440000001444514144334007016625 0ustar liggesusers#' #' lintessmakers.R #' #' Creation of linear tessellations #' and intersections between lintess objects #' #' $Revision: 1.4 $ $Date: 2020/11/04 02:19:04 $ #' divide.linnet <- local({ #' Divide a linear network into tiles demarcated by #' the points of a point pattern divide.linnet <- function(X) { stopifnot(is.lpp(X)) L <- as.linnet(X) coo <- coords(X) #' add identifiers of endpoints coo$from <- L$from[coo$seg] coo$to <- L$to[coo$seg] #' group data by segment, sort by increasing 'tp' coo <- coo[with(coo, order(seg, tp)), , drop=FALSE] bits <- split(coo, coo$seg) #' expand as a sequence of intervals bits <- lapply(bits, expanddata) #' reassemble as data frame df <- Reduce(rbind, bits) #' find all undivided segments other <- setdiff(seq_len(nsegments(L)), unique(coo$seg)) #' add a single line for each undivided segment if(length(other) > 0) df <- rbind(df, data.frame(seg=other, t0=0, t1=1, from=L$from[other], to=L$to[other])) #' We now have a tessellation #' Sort again df <- df[with(df, order(seg, t0)), , drop=FALSE] #' Now identify connected components #' Two intervals are connected if they share an endpoint #' that is a vertex of the network. nvert <- nvertices(L) nbits <- nrow(df) iedge <- jedge <- integer(0) for(iv in seq_len(nvert)) { joined <- with(df, which(from == iv | to == iv)) njoin <- length(joined) if(njoin > 1) iedge <- c(iedge, joined[-njoin]) jedge <- c(jedge, joined[-1L]) } lab0 <- cocoEngine(nbits, iedge - 1L, jedge - 1L) lab <- lab0 + 1L lab <- as.integer(factor(lab)) df <- df[,c("seg", "t0", "t1")] df$tile <- lab return(lintess(L, df)) } expanddata <- function(z) { df <- with(z, data.frame(seg=c(seg[1L], seg), t0 = c(0, tp), t1 = c(tp, 1), from=NA_integer_, to=NA_integer_)) df$from[1L] <- z$from[1L] df$to[nrow(df)] <- z$to[1L] return(df) } divide.linnet }) intersect.lintess <- function(X, Y) { # common refinement of two tessellations on linear network verifyclass(X, "lintess") verifyclass(Y, "lintess") if(!identical(as.linnet(X), as.linnet(Y))) stop("X and Y must be defined on the same linear network") L <- as.linnet(X) ns <- nsegments(L) marX <- marks(X) marY <- marks(Y) X <- X$df Y <- Y$df XY <- data.frame(seg=integer(0), t0=numeric(0), t1=numeric(0), tile=character(0)) for(seg in seq_len(ns)) { xx <- X[X$seg == seg, , drop=FALSE] yy <- Y[Y$seg == seg, , drop=FALSE] nxx <- nrow(xx) nyy <- nrow(yy) if(nxx > 0 && nyy > 0) { for(i in 1:nxx) { xxi <- xx[i,,drop=FALSE] xr <- with(xxi, c(t0, t1)) for(j in 1:nyy) { yyj <- yy[j,,drop=FALSE] yr <- with(yyj, c(t0, t1)) zz <- intersect.ranges(xr, yr, fatal=FALSE) if(!is.null(zz)) { XY <- rbind(XY, data.frame(seg=seg, t0=zz[1], t1=zz[2], tile=paste0(xxi$tile, ":", yyj$tile))) } } } } } out <- lintess(L, XY) if(!is.null(marX) || !is.null(marY)) { ## associate marks with TILES XYtiles <- levels(out$df$tile) posstiles <- outer(levels(X$tile), levels(Y$tile), paste, sep=":") m <- match(XYtiles, as.character(posstiles)) if(anyNA(m)) stop("Internal error in matching tile labels") xid <- as.integer(row(posstiles))[m] yid <- as.integer(col(posstiles))[m] marXid <- marksubset(marX, xid) marYid <- marksubset(marY, yid) if(is.null(marX)) { marks(out) <- marYid } else if(is.null(marY)) { marks(out) <- marXid } else { if(identical(ncol(marX), 1L)) colnames(marXid) <- "marksX" if(identical(ncol(marY), 1L)) colnames(marYid) <- "marksY" marks(out) <- data.frame(marksX=marXid, marksY=marYid) } } return(out) } chop.linnet <- function(X, L) { X <- as.linnet(X) verifyclass(L, "infline") ## convert infinite lines to segments LS <- clip.infline(L, Frame(X)) linemap <- marks(LS) # line which generated each segment ## extract segments of network XS <- as.psp(X) ## find crossing points (remembering provenance) Y <- crossing.psp(LS, XS, fatal=FALSE, details=TRUE) ## initialise tessellation Tess <- lintess(X) if(is.null(Y) || npoints(Y) == 0) return(Tess) ## extract info about network V <- vertices(X) startvertex <- X$from nXS <- nsegments(XS) segseq <- seq_len(nXS) ## allocate vertices to halfplanes defined by lines Vin <- whichhalfplane(L, V) ## group crossing-points by the infinite line that made them M <- marks(Y) # column names: iA, tA, jB, tB MM <- split(M, linemap[M$iA], drop=FALSE) #' for each infinite line, #' make the tessellation induced by this line for(i in seq_along(MM)) { Mi <- MM[[i]] if(is.data.frame(Mi) && (ni <- nrow(Mi)) > 0) { #' for each segment, determine which end is in lower left halfplane startsinside <- Vin[i, startvertex ] if(anyNA(startsinside)) browser() #' find segments of X that are split, and position of split jj <- Mi$jB tt <- Mi$tB ss <- startsinside[jj] #' assemble data for these segments: 2 entries for each split segment inside <- paste0(i, ifelse(ss, "-", "+")) outside <- paste0(i, ifelse(ss, "+", "-")) df <- data.frame(seg=rep(jj, 2), t0=c(rep(0, ni), tt), t1=c(tt, rep(1, ni)), tile=c(inside, outside)) #' segments not split otherseg <- segseq[-jj] #' segments entirely inside otherin <- startsinside[otherseg] #' tack on if(any(otherin)) df <- rbind(df, data.frame(seg=otherseg[otherin], t0=0, t1=1, tile=paste0(i, "-"))) if(any(!otherin)) df <- rbind(df, data.frame(seg=otherseg[!otherin], t0=0, t1=1, tile=paste0(i, "+"))) #' make it a tessellation Tessi <- lintess(X, df) #' intersect with existing Tess <- intersect.lintess(Tess, Tessi) } } return(Tess) } spatstat.linnet/R/density.loo.R0000644000176200001440000001115314144334010016173 0ustar liggesusers#' #' density.loo.R #' #' Compute leave-one-out density estimates at each data point #' on a linear network #' #' Copyright (c) Greg McSwiggan and Adrian Baddeley 2017-2020 densitypointsLPP <- function(x, sigma, ..., weights=NULL, nsigma=1, leaveoneout=TRUE, fast=TRUE, fastmethod=c("onestep", "absorb"), floored=TRUE, dx=NULL, dt=NULL, iterMax=1e6, verbose=FALSE, debug=FALSE) { stopifnot(is.lpp(x)) verbose <- verbose || debug #' compute density estimates at points fastmethod <- match.arg(fastmethod) if(identical(sigma, Inf)) { if(nsigma != 1) stop("nsigma should be equal to 1 when sigma is infinite") return(flatdensityatpointslpp(x, leaveoneout=leaveoneout, weights=weights, disconnect=TRUE)) } if(!leaveoneout || fast) { #' compute density estimates at points WITHOUT omitting points f <- densityfun(x, sigma, weights=weights, nsigma=nsigma, ..., dx=dx, dt=dt, iterMax=iterMax, verbose=verbose, debug=debug) tau <- attr(f, "sigma") y <- f(x) attr(y, "sigma") <- tau if(!leaveoneout) return(y) #' fast approximation to leave-one-out estimate #' evaluate approximation to heat kernel and subtract from 'y' if(nsigma == 1) y <- matrix(y, ncol=1) lenf <- lengths_psp(as.psp(domain(x))) coo <- coords(x) seg <- coo$seg len <- lenf[seg] pos <- len * coo$tp result <- ker0 <- matrix(, npoints(x), ncol(y)) rr <- as.integer(row(ker0)) cc <- as.integer(col(ker0)) switch(fastmethod, absorb = { ker0[] <- hotrod(len[rr], pos[rr], pos[rr], tau[cc], ends="absorbing") }, onestep = { antipos <- len - pos L <- domain(x) vv <- vertexdegree(L) dleft <- vv[L$from[seg]] dright <- vv[L$to[seg]] Tau <- tau[cc] ker0[] <- pmax(0, dnorm(0, 0, Tau) + (2/dleft - 1) * dnorm(2 * pos[rr], 0, Tau) + (2/dright - 1) * dnorm(2 * antipos[rr], 0, Tau)) }) if(floored) ker0[] <- pmax(ker0[], 1/volume(domain(x))) result[] <- pmax(0, y - ker0) if(ncol(result) == 1) result <- as.numeric(result) attr(result, "sigma") <- tau return(result) } #' get setup data g <- densityfun(x, sigma, weights=weights, nsigma=nsigma, ..., dx=dx, dt=dt, iterMax=iterMax, verbose=verbose, debug=debug, exit="setup") #' extract internal data finenet <- g$linnet_obj lixelmap <- as.integer(g$lixelmap) lixelweight <- g$lixelweight Amatrix <- g$Amatrix U0 <- g$U0 deltax <- g$deltax deltat <- g$deltat niter <- g$niter nsave <- g$nsave #' if(debug) { cat("finenet:\n") print(finenet) cat("lixelmap:\n") str(lixelmap) cat("lixelweight:\n") str(lixelweight) } #' #' do the full iterative computation for each X[-i] U0 <- as.numeric(U0) v <- looHeatLPP(U0, Amatrix, npoints(x), niter, nsave, lixelweight, lixelmap, verbose=verbose) result <- if(nsave == 1) as.numeric(v) else t(v) attr(result, "sigma") <- sigma * sqrt(attr(v, "iter")/niter) attr(result, "deltat") <- deltat attr(result, "deltax") <- deltax return(result) } looHeatLPP <- function(U0, Amatrix, npts, niter, nsave, lixelweight, lixelmap, verbose=TRUE) { niter <- as.integer(niter) nsave <- as.integer(nsave) lixelmap <- as.integer(lixelmap) nsave <- min(nsave, niter) result <- matrix(nrow=nsave, ncol=npts) pstate <- list() if(verbose) cat(paste("Processing", npts, "points ... ")) #' save results of 'nsave' equally-spaced iterations blocksize <- niter/nsave blockends <- pmin(niter, blocksize * (1:nsave)) blockleng <- diff(c(0L, blockends)) for(i in 1:npts) { u0 <- U0 #' subtract weights corresponding to i-th data point ii <- 2 * i + c(-1, 0) ll <- lixelmap[ii] ww <- lixelweight[ii] u0[ll] <- u0[ll] - ww #' which node is closest to i-th data point knodei <- ll[which.max(abs(ww))] #' run solver U <- u0 for(jsave in 1:nsave) { nsub <- blockleng[jsave] for(l in seq_len(nsub)) U <- Amatrix %*% U result[jsave, i] <- U[knodei] } if(verbose) pstate <- progressreport(i, npts, state=pstate) } attr(result, "iter") <- blockends return(result) } spatstat.linnet/R/linearpcfmulti.R0000644000176200001440000002223314144334007016751 0ustar liggesusers# # linearpcfmulti.R # # $Revision: 1.15 $ $Date: 2020/01/11 04:36:59 $ # # pair correlation functions for multitype point pattern on linear network # # linearpcfdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.dotfun(result, "g", type, i) attr(result, "correction") <- correction return(result) } linearpcfcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearpcf(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.crossfun(result, "g", type, i, j) attr(result, "correction") <- correction return(result) } linearpcfmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) # lambdaI <- nI/lengthL # lambdaJ <- nJ/lengthL # compute pcf denom <- (nI * nJ - nIandJ)/lengthL g <- linearPCFmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(g, "correction") type <- if(correction == "Ang") "L" else "net" g <- rebadge.as.crossfun(g, "g", type, "I", "J") attr(g, "correction") <- correction return(g) } # ................ inhomogeneous ............................ linearpcfdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # compute result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.dotfun(result, "g", type, i) attr(result, "correction") <- correction return(result) } linearpcfcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearpcfinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.crossfun(result, "g", type, i, j) attr(result, "correction") <- correction return(result) } linearpcfmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...) lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...) # compute pcf weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) g <- linearPCFmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(g, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" g <- rebadge.as.crossfun(g, "g", type, "I", "J") attr(g, "correction") <- correction attr(g, "dangerous") <- union(attr(lambdaI, "dangerous"), attr(lambdaJ, "dangerous")) return(g) } # .............. internal ............................... linearPCFmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(correction == "Ang") { fname <- c("g", "list(L, I, J)") ylab <- quote(g[L,I,J](r)) } else { fname <- c("g", "list(net, I, J)") ylab <- quote(g[net,I,J](r)) } # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) g <- fv(df, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname) unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } # ## nI <- sum(I) ## nJ <- sum(J) ## whichI <- which(I) ## whichJ <- which(J) clash <- I & J has.clash <- any(clash) ## compute pairwise distances DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { ## exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } #--- compile into pair correlation function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(DIJ, r, denom=denom, check=FALSE, fname=fname) g <- rebadge.as.crossfun(g, "g", "net", "I", "J") unitname(g) <- unitname(X) attr(g, "correction") <- correction return(g) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Ang's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountCrossEnds(X, I, J, DIJ, toler) edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(DIJ, r, weights=wt, denom=denom, check=FALSE, ..., fname=fname) ## rebadge and tweak g <- rebadge.as.crossfun(g, "g", "L", "I", "J") fname <- attr(g, "fname") # tack on theoretical value g <- bind.fv(g, data.frame(theo=rep(1,length(r))), makefvlabel(NULL, NULL, fname, "pois"), "theoretical Poisson %s") unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # show working if(showworking) attr(g, "working") <- list(DIJ=DIJ, wt=wt) attr(g, "correction") <- correction return(g) } spatstat.linnet/R/bw.lppl.R0000644000176200001440000001122514144334010015302 0ustar liggesusers#' #' bw.lppl.R #' #' Likelihood cross-validation for kernel smoother of point pattern on network #' #' $Revision: 1.1 $ $Date: 2020/04/08 06:41:46 $ #' bw.lppl <- function(X, ..., srange=NULL, ns=16, sigma=NULL, weights=NULL, distance=c("euclidean", "path"), shortcut=TRUE, warn=TRUE) { stopifnot(is.lpp(X)) distance <- match.arg(distance) if(!is.null(sigma)) { stopifnot(is.numeric(sigma) && is.vector(sigma)) ns <- length(sigma) } else { if(!is.null(srange)) check.range(srange) else { dd <- diameter(Frame(X)) ss <- bw.scott.iso(X) srange <- range(c(ss/10, ss*5, dd/5)) } sigma <- sqrt(seq(from=srange[1L]^2, to=srange[2L]^2, length.out=ns)) } a <- switch(distance, path = lcvlppHeat(X, sigma, ..., weights=weights, shortcut=shortcut, Nsave=max(128, 2*ns)), euclidean = lcvlppQuick(X, sigma, ..., weights=weights, shortcut=shortcut)) result <- with(a, bw.optim(cv, sigma, optimum="max", creator="bw.lppl", criterion="Likelihood Cross-Validation", warnextreme=warn, hargnames="srange", unitname=unitname(X))) return(result) } lcvlppQuick <- function(X, sigmas, ..., weights=NULL, shortcut=TRUE) { ns <- length(sigmas) cv <- numeric(ns) if(shortcut) { #' omit calculation of integral term #' precompute the geometry data lam1 <- densityQuick.lpp(X, sigma=sigmas[1], weights=weights, ..., savecomputed=TRUE) precooked <- attr(lam1, "savedstuff") for(i in 1:ns) { si <- sigmas[i] lamx <- densityQuick.lpp(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, precomputed=precooked, ...) lamx <- pmax(0, lamx) cv[i] <- sum(log(lamx)) } } else { #' full calculation precooked <- NULL cooking <- TRUE for(i in 1:ns) { si <- sigmas[i] lamx <- densityQuick.lpp(X, sigma=si, at="points", leaveoneout=TRUE, weights=weights, precomputed=precooked, ...) lam <- densityQuick.lpp(X, sigma=si, weights=weights, precomputed=precooked, savecomputed=cooking, ...) if(cooking) { #' save geometry info for re-use in subsequent iterations precooked <- attr(lam, "savedstuff") cooking <- FALSE } lamx <- pmax(0, lamx) cv[i] <- sum(log(lamx)) - integral(lam) } } return(list(cv=cv, sigma=sigmas)) } lcvlppHeat <- function(X, sigmas, ..., weights=NULL, shortcut=TRUE, finespacing=FALSE, verbose=FALSE, Nsave=128, dt=NULL,dx=NULL,iterMax=1e6) { #' first determine a resolution that is independent of sigmas #' for consistency between different calls. smax <- mean(sidelengths(Frame(X)))/4 p <- resolve.heat.steps(smax, L=domain(X), dx=dx, dt=dt, iterMax=iterMax, verbose=verbose) #' density at data points lamX <- densitypointsLPP(X, sigma=max(sigmas), ..., dt=p$dt, dx=p$dx, finespacing=finespacing, leaveoneout=TRUE, weights=weights, nsigma=Nsave, verbose=verbose) #' map desired sigmas to actual calculated sigmas actualsigmas <- attr(lamX, "sigma") kleft <- findInterval(sigmas, actualsigmas, all.inside=TRUE, rightmost.closed=TRUE) kright <- pmin(kleft+1L, Nsave) useleft <- (sigmas - actualsigmas[kleft]) < (actualsigmas[kright] - sigmas) kmap <- ifelse(useleft, kleft, kright) #' extract values for desired sigmas lamX <- lamX[ , kmap, drop=FALSE] lamX[] <- pmax(0, lamX) #' compute cross-validation term cv <- colSums(log(lamX)) #' if(shortcut) { cv <- cv - npoints(X) } else { #' add integral term L <- as.linnet(X) lam <- densityfun(X, sigma=max(sigmas), ..., finespacing=finespacing, weights=weights, nsigma=Nsave, verbose=verbose) for(j in seq_along(cv)) { kj <- kmap[j] lamjfun <- function(x, y, seg, tp) { lam(x, y, seg, tp, k=kj) } lamjim <- as.linim(linfun(lamjfun, L)) cv[j] <- cv[j] - integral(lamjim) } } return(list(cv=cv, sigma=actualsigmas[kmap])) } spatstat.linnet/R/randomlpp.R0000644000176200001440000001071314144334007015727 0ustar liggesusers# # random.R # # Random point pattern generators for a linear network # # $Revision: 1.17 $ $Date: 2021/10/01 06:12:25 $ # rpoislpp <- function(lambda, L, ..., nsim=1, drop=TRUE) { if(missing(L) || is.null(L)) { if(inherits(lambda, c("linim", "linfun"))) { L <- as.linnet(lambda) } else if(all(sapply(lambda, inherits, what=c("linim", "linfun")))) { L <- unique(lapply(lambda, as.linnet)) if(length(L) > 1) stop("All entries of lambda must be defined on the same network") L <- L[[1L]] } else stop("L is missing", call.=FALSE) } else verifyclass(L, "linnet") result <- vector(mode="list", length=nsim) S <- as.psp(L) bugout <- (nsim == 1) && drop for(i in seq_len(nsim)) { X <- datagen.rpoisppOnLines(lambda, S, ...) Y <- lpp(X, L) if(bugout) return(Y) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } runiflpp <- function(n, L, nsim=1, drop=TRUE) { verifyclass(L, "linnet") result <- vector(mode="list", length=nsim) S <- as.psp(L) bugout <- (nsim == 1) && drop for(i in seq_len(nsim)) { X <- datagen.runifpointOnLines(n, S) Y <- lpp(X, L) if(bugout) return(Y) result[[i]] <- Y } result <- simulationresult(result, nsim, drop) return(result) } rlpp <- function(n, f, ..., nsim=1, drop=TRUE) { if(inherits(f, "linfun")) f <- as.linim(f, ...) ismulti <- FALSE if(length(n) > 1 && inherits(f, "linim")) { f <- rep(list(f), length(n)) ismulti <- TRUE } else if(!inherits(f, "linim") && is.list(f) && all(sapply(f, inherits, what=c("linim", "linfun")))) { #' f is a list of densities for each type of point if(length(n) == 1) { n <- rep(n, length(f)) } else stopifnot(length(n) == length(f)) ismulti <- TRUE } if(ismulti) { Y <- mapply(rlpp, n=as.list(n), f=f, MoreArgs=list(nsim=nsim, drop=FALSE, ...), SIMPLIFY=FALSE) names(Y) <- names(f) %orifnull% as.character(seq(along=f)) Z <- do.call(mapply, c(list(superimpose), Y, list(SIMPLIFY=FALSE))) result <- simulationresult(Z, nsim, drop) return(result) } if(!inherits(f, "linim")) stop("f should be a linfun or linim object") if(length(n) > 1) { flist <- rep(list(f), length(n)) return(rlpp(n, flist, nsim=nsim, drop=drop, ...)) } check.1.integer(nsim) if(nsim <= 0) return(list()) #' extract data L <- as.linnet(f) df <- attr(f, "df") seglen <- lengths_psp(as.psp(L)) #' sort into segments, left-to-right within segments df <- df[order(df$mapXY, df$tp), , drop=FALSE] nr <- nrow(df) fvals <- df$values if(anyNA(fvals)) stop("f has some NA values") if(min(fvals) < 0) stop("f has some negative values") #' find interval corresponding to each sample point sameseg <- (diff(df$mapXY) == 0) sharenext <- c(sameseg, FALSE) shareprevious <- c(FALSE, sameseg) tcur <- df$tp tnext <- c(tcur[-1], NA) tprev <- c(NA, tcur[-nr]) tleft <- ifelse(shareprevious, (tcur + tprev)/2, 0) tright <- ifelse(sharenext, (tcur + tnext)/2, 1) #' compute probability of each interval probs <- fvals * (tright - tleft) * seglen[df$mapXY] probs <- probs/sum(probs) #' result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { #' sample intervals and place point uniformly in each interval ii <- sample.int(nr, size=n, replace=TRUE, prob=probs) seg <- df[ii, "mapXY"] tp <- runif(n, tleft[ii], tright[ii]) result[[isim]] <- as.lpp(seg=seg, tp=tp, L=L) } result <- simulationresult(result, nsim, drop) return(result) } rjitterlpp <- function(X, ...) { .Deprecated("rjitter.lpp", package="spatstat.linnet") rjitter.lpp(X, ...) } rjitter.lpp <- function(X, radius, ..., nsim=1, drop=TRUE) { verifyclass(X, "lpp") check.1.integer(nsim) stopifnot(nsim >= 1) nX <- npoints(X) if (nX == 0) { result <- rep(list(X), nsim) result <- simulationresult(result, nsim, drop) return(result) } L <- domain(X) mX <- marks(X) cooX <- coords(X) tX <- cooX$tp segX <- cooX$seg ## len <- lengths_psp(as.psp(L)) lenS <- len[segX] relrad <- ifelse(lenS > 0, radius/lenS, 0) ## lo <- pmax(tX-relrad, 0) hi <- pmin(tX+relrad, 1) ra <- hi - lo ## result <- vector(mode="list", length=nsim) for(isim in 1:nsim) { tnew <- lo + ra * runif(nX) tnew <- pmax(0, pmin(1, tnew)) result[[isim]] <- as.lpp(seg=segX, tp=tnew, L=L, marks=mX) } result <- simulationresult(result, nsim, drop) return(result) } spatstat.linnet/R/cdftestlpp.R0000644000176200001440000000436314144334007016107 0ustar liggesusers#' #' cdftestlpp.R #' #' methods for linear networks #' #' $Revision: 1.2 $ $Date: 2020/12/19 05:25:06 $ cdf.test.lpp <- function(X, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- lppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- table(marks(X)) if(all(mf > 0)) { model <- lppm(X ~ marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- lppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- lppm(X) modelname <- "CSR" } dont.complain.about(model) do.call(spatialCDFtest, resolve.defaults(list(quote(model), quote(covariate), test=test), list(interpolate=interpolate, jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } cdf.test.lppm <- function(model, covariate, test=c("ks", "cvm", "ad"), ..., interpolate=TRUE, jitter=TRUE, nsim=99, verbose=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) test <- match.arg(test) verifyclass(model, "lppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call(spatialCDFtest, resolve.defaults(list(quote(model), quote(covariate), test=test), list(interpolate=interpolate, jitter=jitter, nsim=nsim, verbose=verbose), list(...), list(modelname=modelname, covname=covname))) } spatstat.linnet/R/pairdistlpp.R0000644000176200001440000001257314144334007016274 0ustar liggesusers# # pairdistlpp.R # # $Revision: 1.21 $ $Date: 2021/01/07 03:53:07 $ # # # pairdist.lpp # Calculates the shortest-path distance between each pair of points # in a point pattern on a linear network. # pairdist.lpp <- function(X, ..., method="C") { stopifnot(inherits(X, "lpp")) method <- match.arg(method, c("C", "interpreted", "testsymm")) # n <- npoints(X) pairdistmat <- matrix(Inf,n,n) diag(pairdistmat) <- 0 if(n <= 1) return(pairdistmat) # L <- as.linnet(X) # if(!is.connected(L)) { #' disconnected network lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) witch <- attr(Xi, "retainpoints") pairdistmat[witch, witch] <- pairdist.lpp(Xi, method=method) } return(pairdistmat) } # ## ----------- network is connected ------------------------ ## Extract network data Lvert <- L$vertices nvert <- npoints(Lvert) from <- L$from to <- L$to dpath <- L$dpath nseg <- length(from) sparse <- L$sparse || is.null(dpath) ## Point coordinates Y <- as.ppp(X) cooX <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) Xseg <- cooX$seg ## if(sparse || method == "testsymm") { ## new C code for sparse representation tX <- cooX$tp ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L Xseg0 <- Xseg - 1L ## sort points by increasing segment index ordX <- order(Xseg0, tX) Xseg0 <- Xseg0[ordX] tX <- tX[ordX] ## network info seglen <- lengths_psp(L$lines) huge <- 2 * sum(seglen) tol <- L$toler %orifnull% default.linnet.tolerance(L) ## if(method == "testsymm") { ## test whether the sparse algorithm yields symmetric results zz <- .C(SL_linSpairUdist, np = as.integer(n), sp = as.integer(Xseg0), tp = as.double(tX), nv = as.integer(nvert), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(n * n)), PACKAGE="spatstat.linnet") } else { ## use algorithm which exploits symmetry zz <- .C(SL_linSpairdist, np = as.integer(n), sp = as.integer(Xseg0), tp = as.double(tX), nv = as.integer(nvert), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(n * n)), PACKAGE="spatstat.linnet") } pairdistmat[ordX, ordX] <- zz$dist } else { switch(method, interpreted = { ## loop through all pairs of data points for (i in 1:(n-1)) { Xsegi <- Xseg[i] Xi <- Y[i] nbi1 <- from[Xsegi] nbi2 <- to[Xsegi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in (i+1):n) { Xj <- Y[j] Xsegj <- Xseg[j] if(Xsegi == Xsegj) { ## points i and j lie on the same segment ## use Euclidean distance d <- crossdist(Xi, Xj) } else { ## shortest path from i to j passes through ends of segments nbj1 <- from[Xsegj] nbj2 <- to[Xsegj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] ## Calculate shortest of 4 possible paths from i to j d1Xj <- crossdist(vj1,Xj) d2Xj <- crossdist(vj2,Xj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Xj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Xj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Xj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Xj d <- min(d11,d12,d21,d22) } ## store result pairdistmat[i,j] <- pairdistmat[j,i] <- d } } }, C = { ## C code using non-sparse representation ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- Xseg - 1L zz <- .C(SL_linpairdist, np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(nvert), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), answer = as.double(numeric(n*n)), PACKAGE="spatstat.linnet") pairdistmat <- matrix(zz$answer, n, n) }) } return(pairdistmat) } spatstat.linnet/R/simulatelppm.R0000644000176200001440000000157214144334010016444 0ustar liggesusers## ## simulatelppm.R ## ## Simulation of lppm objects ## ## $Revision: 1.7 $ $Date: 2018/05/12 16:14:05 $ ## simulate.lppm <- function(object, nsim=1, ..., new.coef=NULL, progress=(nsim > 1), drop=FALSE) { starttime <- proc.time() if(!is.poisson(object$fit)) stop("Simulation of non-Poisson models is not yet implemented") lambda <- predict(object, ..., new.coef=new.coef) lmax <- if(is.im(lambda)) max(lambda) else unlist(lapply(lambda, max)) L <- as.linnet(object) result <- vector(mode="list", length=nsim) pstate <- list() for(i in seq_len(nsim)) { if(progress) pstate <- progressreport(i, nsim, state=pstate) result[[i]] <- rpoislpp(lambda, L, lmax=lmax) } result <- simulationresult(result, nsim, drop) result <- timed(result, starttime=starttime) return(result) } spatstat.linnet/R/perspex.R0000644000176200001440000001767514141460471015441 0ustar liggesusers#' perspex.R #' #' Perspective plots for linim and linfun objects #' #' Copyright (C) Adrian Baddeley 2015 #' GPL Public Licence >= 2.0 persp.linfun <- function(x, ..., main, eps=NULL, dimyx=NULL, xy=NULL) { if(missing(main)) main <- short.deparse(substitute(x)) #' convert linfun to linim y <- as.linim(x, L=as.linnet(x), eps=eps, dimyx=dimyx, xy=xy) persp(y, ..., main=main) } persp.linim <- local({ persp.linim <- function(x, ..., main, grid=TRUE, ngrid=10, col.grid="grey", col.base="white", neg.args=list(), warncross=FALSE) { xname <- short.deparse(substitute(x)) if(missing(main)) main <- xname dotargs <- list(...) #' L <- as.linnet(x) R <- Frame(L) zlim <- range(x, 0) #' set up perspective transformation and plot horizontal plane Z <- as.im(0, W=R, dimyx=ngrid) col.grid.used <- if(grid && (zlim[1] >= 0)) col.grid else NA argh <- resolve.defaults(list(x=Z, main=main, border=col.grid.used, col=col.base), dotargs, list(axes=FALSE, box=FALSE, zlim=zlim, zlab=xname, scale=TRUE, expand=0.1)) M <- do.call.matched(persp.im, argh, funargs=graphicsPars("persp")) #' compute the projection of the linear network S <- as.psp(L) E <- S$ends x0y0 <- trans3dz(E[,1], E[,2], rep(0, nrow(E)), M) x1y1 <- trans3dz(E[,3], E[,4], rep(0, nrow(E)), M) #' order the segments by their depth in the picture (back to front) segmentsequence <- NULL ## new algorithm po <- depthrelations(x0y0$x, x0y0$z, x1y1$x, x1y1$z, verbose=warncross) if(!is.null(po)) segmentsequence <- partial2total(po, nrow(E)) ## backup if(is.null(segmentsequence)) { ## old algorithm depf <- pmin(x0y0$z, x1y1$z) segmentsequence <- order(depf) } #' extract function data df <- attr(x, "df") nseg <- nobjects(S) #' handle negative values separately if a grid is shown if(grid && zlim[1] < 0) { dfneg <- df dfneg$values <- pmin(0, df$values) df$values <- pmax(0, df$values) #' plot negative part neg.args <- resolve.defaults(neg.args, dotargs) spectiveWalls(dfneg, segmentsequence, E, M, neg.args) #' plot baseline grid on top again spectiveGrid(R, ngrid, M, col=col.grid) } #' plot network do.call.matched(segments, list(x0=x0y0$x, y0=x0y0$y, x1=x1y1$x, y1=x1y1$y, ...)) #' plot function above grid (or entire function if no grid) spectiveWalls(df, segmentsequence, E, M, dotargs) #' invisible(M) } trans3dz <- function(x,y,z,pmat) { tr <- cbind(x, y, z, 1) %*% pmat list(x = tr[, 1]/tr[, 4], y = tr[, 2]/tr[, 4], z = tr[, 3]/tr[, 4]) } spectiveWalls <- function(df, segmentsequence, E, M, pargs) { #' split by segment for(i in segmentsequence) { dfi <- df[df$mapXY == i, , drop=FALSE] if((nn <- nrow(dfi)) > 0) { #' order by position along segment ord <- order(dfi$tp) dfi <- dfi[ord, , drop=FALSE] #' extrapolate to segment endpoints Ei <- E[i,, drop=FALSE] xx <- c(Ei$x0, dfi$x, Ei$x1) yy <- c(Ei$y0, dfi$y, Ei$y1) zz <- with(dfi, c(values[1], values, values[nn])) #' make polygon in 3D xx <- c(xx, rev(xx)) yy <- c(yy, rev(yy)) zz <- c(zz, rep(0, nn+2)) #' project polygon xy <- trans3d(xx, yy, zz, M) do.call.matched(polygon, resolve.defaults(list(x=xy$x, y=xy$y), pargs, list(col="grey"))) } } invisible(NULL) } spectiveGrid <- function(B, ngrid, M, ...) { B <- Frame(B) xr <- B$xrange yr <- B$yrange xx <- seq(xr[1], xr[2], length.out=ngrid+1) yy <- seq(yr[1], yr[2], length.out=ngrid+1) spectiveSegments(xr[1], yy, xr[2], yy, ..., M=M) spectiveSegments(xx, yr[1], xx, yr[2], ..., M=M) invisible(NULL) } spectiveSegments <- function(x0, y0, x1, y1, ..., M) { a0 <- trans3dz(x0, y0, 0, M) a1 <- trans3dz(x1, y1, 0, M) do.call.matched(segments, list(x0=a0$x, y0=a0$y, x1=a1$x, y1=a1$y, ...)) invisible(NULL) } depthrelations <- function(x0, z0, x1, z1, verbose=TRUE, useC=TRUE) { ## Find which segments lie in front of/behind other segments ## x is the projected x-coordinate ## z is the negative depth (z increases as we move closer to the viewer) front <- back <- integer(0) n <- length(x0) if(n >= 2) { ## enforce x0 <= x1 if(any(swap <- (x0 > x1))) { tmp <- x1[swap] x1[swap] <- x0[swap] x0[swap] <- tmp tmp <- z1[swap] z1[swap] <- z0[swap] z0[swap] <- tmp } if(useC) { stuff <- .Call(SL_depthrel, x0, z0, x1, z1, Verb=verbose, PACKAGE="spatstat.linnet") front <- stuff[[1]] back <- stuff[[2]] status <- stuff[[3]] if(status != 0) return(NULL) return(data.frame(front=front, back=back)) } for(i in 2:n) { for(j in 1:(i-1)) { if(x1[i] > x0[j] && x1[j] > x0[i]) { ## overlap occurs ## consider left side z0i <- z0[i] z0j <- z0[j] if(x0[j] < x0[i]) { xleft <- x0[i] tval <- (xleft - x0[j])/(x1[j]-x0[j]) if(!is.finite(tval)) tval <- 0 z0j <- z0j + tval * (z1[j] - z0j) } else { xleft <- x0[j] tval <- (xleft - x0[i])/(x1[i]-x0[i]) if(!is.finite(tval)) tval <- 0 z0i <- z0i + tval * (z1[i] - z0i) } ## consider right side z1i <- z1[i] z1j <- z1[j] if(x1[j] > x1[i]) { xright <- x1[i] tval <- (xright - xleft)/(x1[j]-xleft) if(!is.finite(tval)) tval <- 0 z1j <- z0j + tval * (z1j - z0j) } else { xright <- x1[j] tval <- (xright - xleft)/(x1[i]-xleft) if(!is.finite(tval)) tval <- 0 z1i <- z0i + tval * (z1i - z0i) } ## now determine which is in front if(z0i >= z0j && z1i >= z1j) { ## 'i' is in front front <- c(front, i) back <- c(back, j) } else if(z0i <= z0j && z1i <= z1j){ ## 'j' is in front front <- c(front, j) back <- c(back, i) } else { if(verbose) { warning(paste("segments", i, "and", j, "cross:", "z0i=", z0i, "z0j=", z0j, "z1i=", z1i, "z1j=", z1j)) } return(NULL) } } } } } return(data.frame(front=front, back=back)) } partial2total <- function(po, n=max(po), verbose=TRUE) { ## 'po' represents a partial order amongst 'n' items. ## Find an ordering of the 'n' items that respects the partial order. front <- po$front back <- po$back unresolved <- rep(TRUE, n) sequence <- integer(0) front <- factor(front, levels=1:n) while(any(unresolved)) { bad <- sapply(split(unresolved[back], front), any) found <- unresolved & !bad if(!any(found)) { if(verbose) warning("Could not resolve some entries in the partial order") return(NULL) } sequence <- c(sequence, which(found)) unresolved[found] <- FALSE } return(sequence) } persp.linim }) spatstat.linnet/R/distfunlpp.R0000644000176200001440000000173314144334010016117 0ustar liggesusers# # distfunlpp.R # # method for 'distfun' for class 'lpp' # # $Revision: 1.3 $ $Date: 2018/04/23 04:52:17 $ # distfun.lpp <- local({ distfun.lpp <- function(X, ..., k=1) { stopifnot(inherits(X, "lpp")) force(X) force(k) stopifnot(length(k) == 1) L <- as.linnet(X) f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { # L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) d <- nncross.lpp(Y, X, what="dist", k=k) return(d) } f <- linfun(f, L) assign("k", k, envir=environment(f)) assign("X", X, envir=environment(f)) attr(f, "explain") <- uitleggen attr(f, "extrargs") <- list(k=k) return(f) } uitleggen <- function(x, ...) { splat("Distance function for lpp object") envx <- environment(x) k <- get("k", envir=envx) if(k != 1L) splat("Yields distance to", ordinal(k), "nearest point") X <- get("X", envir=envx) print(X) } distfun.lpp }) spatstat.linnet/R/subsetlpp.R0000644000176200001440000000016514144334010015746 0ustar liggesusers#' #' spatstat.linnet/R/subsetlpp.R #' #' $Revision: 1.1 $ $Date: 2020/06/16 03:19:14 $ #' subset.lpp <- subset.ppx spatstat.linnet/R/linearmrkcon.R0000644000176200001440000000307614144334007016423 0ustar liggesusers# # linearmrkcon.R # # mark connection function & mark equality function for linear networks # # $Revision: 1.4 $ $Date: 2017/02/07 08:12:05 $ # linearmarkconnect <- function(X, i, j, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # pcfij <- linearpcfcross(X, i, j, r=r, ...) pcfall <- linearpcf(X, r=r, ...) qi <- mean(marx == i) qj <- mean(marx == j) result <- eval.fv(qi * qj * pcfij/pcfall) # rebrand result <- rebadge.as.crossfun(result, "p", "L", i, j) attr(result, "labl") <- attr(pcfij, "labl") return(result) } linearmarkequal <- local({ linearmarkequal <- function(X, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") ## ensure distance information is present X <- as.lpp(X, sparse=FALSE) lev <- levels(marks(X)) v <- list() for(l in lev) v[[l]] <- linearmarkconnect(X, l, l, r=r, ...) result <- Reduce(addfuns, v) result <-rebadge.fv(result, quote(p[L](r)), new.fname=c("p", "L")) attr(result, "labl") <- attr(v[[1L]], "labl") return(result) } addfuns <- function(f1, f2) eval.fv(f1 + f2) linearmarkequal }) spatstat.linnet/R/First.R0000644000176200001440000000061614141460471015025 0ustar liggesusers## spatstat.linnet/R/First.R .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat.linnet"), fields="Version") vs <- as.character(vs) putSpatstatVariable("SpatstatLinnetVersion", vs) packageStartupMessage(paste("spatstat.linnet", vs)) return(invisible(NULL)) } spatstat.linnet/R/nnfromvertex.R0000644000176200001440000000777414144334007016505 0ustar liggesusers#' nnfromvertex.R #' #' Nearest data point to each vertex of a network #' #' $Revision: 1.4 $ $Date: 2021/01/07 03:53:14 $ #' nnfromvertex <- function(X, what=c("dist", "which"), k=1) { stopifnot(is.lpp(X)) what <- match.arg(what, several.ok=TRUE) nX <- npoints(X) nv <- nvertices(domain(X)) #' k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) #' Initialise results nnd <- matrix(Inf, nrow=nv, ncol=kmax) nnw <- matrix(NA_integer_, nrow=nv, ncol=kmax) colnames(nnd) <- colnames(nnw) <- 1:kmax #' Trivial cases if(nX > 0) { #' Unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths_psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' .............................................. #' number of neighbours that are well-defined kmaxcalc <- min(nX, kmax) #' calculate k-nn distances and identifiers for 1 <= k <= kmaxcalc z <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=kmaxcalc) vnndist <- z$vnndist vnnwhich <- z$vnnwhich #' map identifiers back to original data pattern vnnwhich <- coUXord$lab[vnnwhich] #' insert results in correct places nnd[, 1:kmaxcalc] <- vnndist nnw[, 1:kmaxcalc] <- vnnwhich } #' extract required values nnd <- nnd[,k, drop=TRUE] nnw <- nnw[,k, drop=TRUE] if(identical(what, "dist")) return(nnd) if(identical(what, "which")) return(nnw) return(cbind(data.frame(dist=nnd), data.frame(which=nnw))) } vnnFind <- function(seg, tp, ns, nv, from, to, seglen, huge, tol, kmax=1) { #' Find data point nearest to each vertex of network #' Assumed 'seg' is sorted in increasing order #' 'tp' is increasing within 'seg' nX <- length(seg) from0 <- from - 1L to0 <- to - 1L seg0 <- seg - 1L #' if(kmax == 1) { z <- .C(SL_Clinvwhichdist, np = as.integer(nX), sp = as.integer(seg0), tp = as.double(tp), nv = as.integer(nv), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(nv)), which = as.integer(integer(nv)), PACKAGE="spatstat.linnet") } else { z <- .C(SL_linvknndist, kmax = as.integer(kmax), nq = as.integer(nX), sq = as.integer(seg0), tq = as.double(tp), nv = as.integer(nv), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(kmax * nv)), which = as.integer(integer(kmax * nv)), PACKAGE="spatstat.linnet") } vnndist <- z$dist vnnwhich <- z$which + 1L vnnwhich[vnnwhich == 0] <- NA # possible if network is disconnected if(kmax > 1) { vnndist <- matrix(vnndist, ncol=kmax, byrow=TRUE) vnnwhich <- matrix(vnnwhich, ncol=kmax, byrow=TRUE) } return(list(vnndist=vnndist, vnnwhich=vnnwhich)) } spatstat.linnet/R/Math.linim.R0000644000176200001440000000453214144334007015735 0ustar liggesusers## ## Math.linim.R ## ## $Revision: 1.10 $ $Date: 2021/07/01 01:56:18 $ ## Math.linim <- function(x, ...){ m <- do.call(.Generic, list(x[,,drop=FALSE], ...)) Z <- im(m, xcol = x$xcol, yrow = x$yrow, xrange = x$xrange, yrange = x$yrange, unitname = unitname(x)) df <- attr(x, "df") df$values <- do.call(.Generic, list(df$values, ...)) L <- attr(x, "L") rslt <- linim(L, Z, df=df, restrict=FALSE) return(rslt) } Summary.linim <- function(..., na.rm, finite){ if(missing(finite)) finite <- FALSE if(missing(na.rm)) na.rm <- FALSE argh <- list(...) values <- lapply(argh, "[") dfvalues <- if(is.element(.Generic, c("sum", "prod"))) list() else lapply(lapply(argh, attr, which="df"), getElement, name="values") vals <- unlist(c(values, dfvalues)) logique <- is.element(.Generic, c("all", "any")) vals <- if(logique) as.logical(vals) else as.numeric(vals) if(finite && !logique) { vals <- vals[is.finite(vals)] } else if(na.rm) { vals <- vals[!is.na(vals)] } do.call(.Generic, list(vals)) } Complex.linim <- function(z){ L <- attr(z, "L") df <- attr(z, "df") m <- do.call(.Generic, list(z=z[,,drop=FALSE])) Z <- im(m, xcol = z$xcol, yrow = z$yrow, xrange = z$xrange, yrange = z$yrange, unitname = unitname(z)) df$values <- do.call(.Generic, list(z=df$values)) rslt <- linim(L, Z, df=df, restrict=FALSE) return(rslt) } ## This function defines what happens inside 'Ops.linim' ## The formal definition of 'Ops.linim' is now in 'Math.linimlist.R' LinimOp <- function(e1, e2=NULL, op) { ## operate on a linim or pair of linim with fallback to im if(is.null(e2)) { ## unary operation if(!is.element(op, c("!", "-", "+"))) stop(paste("Unary operation", sQuote(op), "is undefined for images"), call.=FALSE) expr <- parse(text = paste(op, "e1")) netted <- is.linim(e1) } else { expr <- parse(text = paste("e1", op, "e2")) net1 <- is.linim(e1) net2 <- is.linim(e2) no.force.im1 <- net1 || is.null(dim(e1)) no.force.im2 <- net2 || is.null(dim(e2)) netted <- (net1 || net2) && no.force.im1 && no.force.im2 } result <- if(netted) { do.call(eval.linim, list(expr=expr)) } else { do.call(eval.im, list(expr = expr)) } return(result) } spatstat.linnet/R/nndistlpp.R0000644000176200001440000005177414144334007015762 0ustar liggesusers# # nndistlpp.R # # $Revision: 1.28 $ $Date: 2021/01/07 03:53:21 $ # # Methods for nndist, nnwhich, nncross for linear networks # # nndist.lpp # Calculates the nearest neighbour distances in the shortest-path metric # for a point pattern on a linear network. nndist.lpp <- function(X, ..., k=1, by=NULL, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) n <- npoints(X) k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) if(!is.null(by)) return(genericNNdistBy(X, by, k=k)) L <- as.linnet(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected result <- matrix(Inf, n, length(k)) # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) relevant <- attr(Xi, "retainpoints") result[relevant, ] <- nndist.lpp(Xi, k=k, method=method) } return(result) } } toomany <- (kmax >= n-1) if(toomany) { ## not enough points to define kmax nearest neighbours result <- matrix(Inf, nrow=n, ncol=kmax) if(n <= 1) return(result[,k,drop=TRUE]) ## reduce kmax to feasible value kmax <- n-1 kuse <- k[k <= kmax] } else { kuse <- k } Y <- as.ppp(X) sparse <- identical(L$sparse, TRUE) ## find nearest segment for each point ## This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { ## interpreted code D <- pairdist(X, method="interpreted") diag(D) <- Inf ans <- if(kmax == 1) apply(D, 1, min) else t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE] } else if(!sparse && kmax == 1) { # C code for non-sparse network Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L nseg <- length(from0) # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths_psp(Lseg)) # space for result ans <- double(n) # call C zz <- .C(SL_linnndist, np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), answer = as.double(ans), PACKAGE="spatstat.linnet") ans <- zz$answer } else if(spatstat.options('Cnndistlpp')) { ## use new C routine Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to ## nseg <- length(from) seglen <- lengths_psp(Lseg) ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L tp <- loco$tp ## sort by segment index oo <- order(segmap, tp) segmap <- segmap[oo] tp <- tp[oo] # upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) #' kmax1 <- kmax + 1L zz <- .C(SL_linknnd, kmax = as.integer(kmax1), np = as.integer(n), sp = as.integer(segmap), tp = as.double(tp), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), nndist = as.double(numeric(n * kmax1)), nnwhich = as.integer(integer(n * kmax1)), PACKAGE="spatstat.linnet") ans <- matrix(, n, kmax1) ans[oo, ] <- matrix(zz$nndist, n, kmax1, byrow=TRUE) # drop first column which is zero corresponding to j = i ans <- ans[, -1, drop=FALSE] colnames(ans) <- paste0("dist.", 1:ncol(ans)) ans <- ans[,kuse] } else { ## use fast code for nncross ans <- nncross(X, X, what="dist", k=kuse+1) if(is.matrix(ans) || is.data.frame(ans)) colnames(ans) <- paste0("dist.", kuse) } if(!is.null(dim(ans))) { ans <- as.matrix(ans) rownames(ans) <- NULL } if(!toomany) return(ans) result[, kuse] <- as.matrix(ans) colnames(result) <- paste0("dist.", 1:ncol(result)) return(result[,k]) } # nnwhich.lpp # Identifies the nearest neighbours in the shortest-path metric # for a point pattern on a linear network. # nnwhich.lpp <- function(X, ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) n <- npoints(X) L <- as.linnet(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected result <- matrix(NA_integer_, n, length(k)) # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) for(i in seq_along(subsets)) { Xi <- thinNetwork(X, retainvertices=subsets[[i]]) relevant <- attr(Xi, "retainpoints") result[relevant, ] <- nnwhich.lpp(Xi, k=k, method=method) } return(result) } } toomany <- (kmax >= n-1) if(toomany) { ## not enough points to define kmax nearest neighbours result <- matrix(NA_integer_, nrow=n, ncol=kmax) if(n <= 1) return(result[,k,drop=TRUE]) ## reduce kmax to feasible value kmax <- n-1 kuse <- k[k <= kmax] } else { kuse <- k } # Y <- as.ppp(X) sparse <- identical(L$sparse, TRUE) ## find nearest segment for each point ## This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { D <- pairdist(X, method="interpreted") diag(D) <- Inf nnw <- if(kmax == 1) apply(D, 1, which.min) else t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE] } else if(!sparse && kmax == 1) { # C code for non-sparse network ## Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L nseg <- length(from0) # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths_psp(Lseg)) # space for result nnd <- double(n) nnw <- integer(n) # call C zz <- .C(SL_linnnwhich, np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE="spatstat.linnet") # convert C indexing to R indexing nnw <- zz$nnwhich + 1L # any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } else if(spatstat.options('Cnndistlpp')) { ## use new C routine Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to ## nseg <- length(from) seglen <- lengths_psp(Lseg) ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L tp <- loco$tp ## sort by segment index oo <- order(segmap, tp) segmap <- segmap[oo] tp <- tp[oo] # upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) #' kmax1 <- kmax + 1L zz <- .C(SL_linknnd, kmax = as.integer(kmax1), np = as.integer(n), sp = as.integer(segmap), tp = as.double(tp), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), nndist = as.double(numeric(n * kmax1)), nnwhich = as.integer(integer(n * kmax1)), PACKAGE="spatstat.linnet") nnw <- matrix(, n, kmax1) nnw[oo, ] <- matrix(oo[zz$nnwhich + 1L], n, kmax1, byrow=TRUE) # drop first column which is j = i nnw <- nnw[, -1, drop=FALSE] colnames(nnw) <- paste0("which.", 1:ncol(nnw)) nnw <- nnw[,kuse] } else { ## use fast code for nncross nnw <- nncross(X, X, what="which", k=kuse+1) if(is.matrix(nnw) || is.data.frame(nnw)) colnames(nnw) <- paste0("which.", kuse) } if(!is.null(dim(nnw))) { nnw <- as.matrix(nnw) rownames(nnw) <- NULL } if(!toomany) return(nnw) result[, kuse] <- as.matrix(nnw) colnames(result) <- paste0("which.", 1:ncol(result)) return(result[,k]) } # nncross.lpp # Identifies the nearest neighbours in the shortest-path metric # from one point pattern on a linear network to ANOTHER pattern # on the SAME network. # nncross.lpp <- local({ nncross.lpp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k=1, method="C") { stopifnot(inherits(X, "lpp")) stopifnot(inherits(Y, "lpp")) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) stopifnot(method %in% c("C", "interpreted")) if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) check <- resolve.defaults(list(...), list(check=TRUE))$check if(check && !identical(as.linnet(X, sparse=TRUE), as.linnet(Y, sparse=TRUE))) stop("X and Y are on different linear networks") # internal use only format <- resolve.defaults(list(...), list(format="data.frame"))$format nX <- npoints(X) nY <- npoints(Y) L <- domain(X) if(is.null(br <- L$boundingradius) || is.infinite(br)) { # network may be disconnected lab <- connected(L, what="labels") if(length(levels(lab)) > 1L) { # network is disconnected # handle each connected component separately subsets <- split(seq_len(nvertices(L)), lab) nndistmat <- if("dist" %in% what) matrix(Inf, nX, length(k)) else NULL nnwhichmat <- if("which" %in% what) matrix(NA_integer_, nX, length(k)) else NULL for(i in seq_along(subsets)) { subi <- subsets[[i]] Xi <- thinNetwork(X, retainvertices=subi) useX <- attr(Xi, "retainpoints") Yi <- thinNetwork(Y, retainvertices=subi) useY <- attr(Yi, "retainpoints") z <- nncross.lpp(Xi, Yi, iX = iX[useX], iY=iY[useY], what=what, k=k, method=method, format="list") if("dist" %in% what) nndistmat[useX, ] <- z$dist if("which" %in% what) nnwhichmat[useX, ] <- which(useY)[z$which] } return(shapedresult(dist=nndistmat, which=nnwhichmat, what=what, format=format)) } } koriginal <- k <- as.integer(k) stopifnot(all(k > 0)) kmax <- max(k) #' decide which algorithm to use #' fast C algorithm fast <- (method == "C") && (spatstat.options("Cnncrosslpp") || (kmax > 1)) #' slower C algorithm for exclusion case for k=1 excludeinC <- exclude && (method == "C") && !fast && (k == 1) excludeinR <- exclude && !excludeinC if(excludeinR) { #' compute k+1 neighbours in C, then filter in R kmax <- kmax+1 k <- 1:kmax } toomany <- (kmax > nY) if(toomany) { paddist <- matrix(Inf, nX, kmax) padwhich <- matrix(NA_integer_, nX, kmax) kmax <- nY kuse <- k[k <= kmax] } else { kuse <- k } if(length(kuse) == 0) { # None of the required values are defined nnd <- paddist nnw <- padwhich maxk <- max(k) colnames(nnd) <- paste0("dist.", seq_len(maxk)) colnames(nnd) <- paste0("dist.", seq_len(maxk)) nnd <- nnd[,k,drop=TRUE] nnw <- nnw[,k,drop=TRUE] return(shapedresult(dist=nnd, which=nnw, what=what, format=format)) } need.dist <- ("dist" %in% what) || excludeinR need.which <- ("which" %in% what) || excludeinR if(!fast) { ## require dpath matrix Xsparse <- identical(domain(X)$sparse, TRUE) Ysparse <- identical(domain(Y)$sparse, TRUE) L <- if(!Xsparse && Ysparse) as.linnet(X) else if(Xsparse && !Ysparse) as.linnet(Y) else as.linnet(X, sparse=FALSE) } else L <- as.linnet(X) # nX <- npoints(X) nY <- npoints(Y) P <- as.ppp(X) Q <- as.ppp(Y) # Lvert <- L$vertices from <- L$from to <- L$to if(fast) { seglengths <- lengths_psp(as.psp(L)) } else { dpath <- L$dpath } # deal with null cases if(nX == 0) return(shapedresult(dist=numeric(0), which=integer(0), what=what, format=format)) if(nY == 0) return(shapedresult(dist=rep(Inf, nX), which=rep(NA_integer_, nX), what=what, format=format)) # find nearest segment for each point Xcoords <- coords(X) Ycoords <- coords(Y) Xpro <- Xcoords$seg Ypro <- Ycoords$seg # handle serial numbers if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if(method == "interpreted") { ## interpreted code D <- crossdist(X, Y, method="interpreted") if(exclude) D[outer(iX, iY, "==")] <- Inf nnd <- nnw <- NULL if(need.dist) { nnd <- if(kmax == 1) apply(D, 1, min) else t(apply(D, 1, orderstats, k=kuse))[,,drop=TRUE] } if(need.which) { nnw <- if(kmax == 1) apply(D, 1, which.min) else t(apply(D, 1, orderwhich, k=kuse))[,,drop=TRUE] } } else { ## C code ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L nseg <- length(from0) Xsegmap <- Xpro - 1L Ysegmap <- Ypro - 1L ## upper bound on interpoint distance huge <- if(!fast) { max(dpath) + 2 * diameter(Frame(L)) } else { sum(seglengths) } ## space for result nnd <- double(nX * kmax) nnw <- integer(nX * kmax) ## call C if(fast) { ## experimental faster code ooX <- order(Xsegmap) ooY <- order(Ysegmap) tol <- max(.Machine$double.eps, diameter(Frame(L))/2^20) if(kmax > 1) { zz <- .C(SL_linknncross, kmax = as.integer(kmax), np = as.integer(nX), sp = as.integer(Xsegmap[ooX]), tp = as.double(Xcoords$tp[ooX]), nq = as.integer(nY), sq = as.integer(Ysegmap[ooY]), tq = as.double(Ycoords$tp[ooY]), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglengths), huge = as.double(huge), tol = as.double(tol), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE="spatstat.linnet") zznd <- matrix(zz$nndist, ncol=kmax, byrow=TRUE) zznw <- matrix(zz$nnwhich + 1L, ncol=kmax, byrow=TRUE) if(any(notfound <- (zznw == 0))) { zznd[notfound] <- NA zznw[notfound] <- NA } nnd <- matrix(nnd, nX, kmax) nnw <- matrix(nnw, nX, kmax) nnd[ooX, ] <- zznd nnw[ooX, ] <- ooY[zznw] colnames(nnd) <- colnames(nnw) <- seq_len(kmax) if(!identical(kuse, seq_len(kmax))) { nnd <- nnd[,kuse,drop=FALSE] nnw <- nnw[,kuse,drop=FALSE] if(length(kuse) == 1) { colnames(nnd) <- paste0("dist.", kuse) colnames(nnw) <- paste0("which.", kuse) } } } else { zz <- .C(SL_linSnndwhich, np = as.integer(nX), sp = as.integer(Xsegmap[ooX]), tp = as.double(Xcoords$tp[ooX]), nq = as.integer(nY), sq = as.integer(Ysegmap[ooY]), tq = as.double(Ycoords$tp[ooY]), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglengths), huge = as.double(huge), tol = as.double(tol), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE="spatstat.linnet") zznd <- zz$nndist zznw <- zz$nnwhich + 1L if(any(notfound <- (zznw == 0))) { zznd[notfound] <- NA zznw[notfound] <- NA } nnd[ooX] <- zznd nnw[ooX] <- ooY[zznw] } } else { ## slower code requiring dpath matrix if(!excludeinC) { zz <- .C(SL_linndcross, np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE="spatstat.linnet") nnd <- zz$nndist nnw <- zz$nnwhich + 1L } else { ## excluding certain pairs (k=1) zz <- .C(SL_linndxcross, np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), idP = as.integer(iX), idQ = as.integer(iY), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw), PACKAGE="spatstat.linnet") nnd <- zz$nndist nnw <- zz$nnwhich + 1L } ## any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } } if(toomany) { ## Nearest neighbours were undefined for some large values of k. ## Insert results obtained for valid 'k' back into matrix of NA/Inf if(need.dist) { paddist[,kuse] <- as.matrix(nnd) nnd <- paddist } if(need.which) { padwhich[,kuse] <- as.matrix(nnw) nnw <- padwhich } } if(excludeinR) { ## now find neighbours that don't have the same id number if(!is.matrix(nnw)) nnw <- as.matrix(nnw, ncol=1) if(!is.matrix(nnd)) nnd <- as.matrix(nnd, ncol=1) avoid <- matrix(iX[as.vector(row(nnw))] != iY[as.vector(nnw)], nrow=nrow(nnw), ncol=ncol(nnw)) colind <- apply(avoid, 1, whichcoltrue, m=seq_len(ncol(avoid)-1)) colind <- if(is.matrix(colind)) t(colind) else matrix(colind, ncol=1) rowcol <- cbind(as.vector(row(colind)), as.vector(colind)) nnd <- matrix(nnd[rowcol], nrow=nX) nnw <- matrix(nnw[rowcol], nrow=nX) nnd <- nnd[,koriginal] nnw <- nnw[,koriginal] } return(shapedresult(dist=nnd, which=nnw, what=what, format=format)) } whichcoltrue <- function(x, m) which(x)[m] shapedresult <- function(dist, which, what=c("dist", "which"), format="data.frame") { #' idiom to return result in correct format result <- list(dist=dist, which=which)[what] if(format == "data.frame") result <- as.data.frame(result)[,,drop=TRUE] return(result) } nncross.lpp }) spatstat.linnet/R/relrisk.lpp.R0000644000176200001440000005566614144334007016220 0ustar liggesusers# # relrisk.lpp.R # # Estimation of relative risk on network # # $Revision: 1.6 $ $Date: 2020/04/27 03:08:26 $ # relrisk.lpp <- local({ relrisk.lpp <- function(X, sigma, ..., at=c("pixels", "points"), relative=FALSE, adjust=1, casecontrol=TRUE, control=1, case, finespacing=FALSE) { stopifnot(is.lpp(X)) stopifnot(is.multitype(X)) control.given <- !missing(control) case.given <- !missing(case) at <- match.arg(at) ## marx <- marks(X) types <- levels(marx) ntypes <- length(types) ## if(ntypes == 1L) stop("Data contains only one type of points") casecontrol <- casecontrol && (ntypes == 2L) if((control.given || case.given) && !(casecontrol || relative)) { aa <- c("control", "case")[c(control.given, case.given)] nn <- length(aa) warning(paste(ngettext(nn, "Argument", "Arguments"), paste(sQuote(aa), collapse=" and "), ngettext(nn, "was", "were"), "ignored, because relative=FALSE and", if(ntypes==2L) "casecontrol=FALSE" else "there are more than 2 types of points")) } ## compute bandwidth if(is.function(sigma)) { sigma <- do.call.matched(sigma, list(X=X, ...)) if(!is.numeric(sigma)) stop("The function 'sigma' did not return a numerical value", call.=FALSE) } check.1.real(sigma) # includes Inf sigma <- adjust * as.numeric(sigma) ## ......................................... ## compute intensity estimates for each type ## ......................................... Y <- split(X) switch(at, pixels = { ## intensity estimates of each type Deach <- solapply(Y, density.lpp, sigma=sigma, ..., finespacing=finespacing) ## compute intensity estimate for unmarked pattern Dall <- density(unmark(X), sigma=sigma, ..., finespacing=finespacing) }, points = { ## intensity estimates of each type **at each data point** Deachfun <- solapply(Y, densityfun.lpp, sigma=sigma, ..., finespacing=finespacing) Deach <- as.data.frame(sapply(Deachfun, function(f, P) f(P), P=X)) ## leave-one-out estimates Dself <- lapply(Y, density.lpp, sigma=sigma, at="points", leaveoneout=TRUE, ..., finespacing=finespacing) ## insert leave-one-out estimates in correct place Deachsplit <- split(Deach, marx) for(j in 1:ntypes) { Deachsplit[[j]][, j] <- Dself[[j]] } split(Deach, marx) <- Deachsplit ## total Dall <- rowSums(Deach) }) ## ......................................... ## compute probabilities/risks ## ......................................... if(ntypes == 2 && casecontrol) { if(control.given || !case.given) { stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:2) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) if(!case.given) icase <- 3 - icontrol } if(case.given) { stopifnot(length(case) == 1) if(is.numeric(case)) { icase <- case <- as.integer(case) stopifnot(case %in% 1:2) } else if(is.character(case)) { icase <- match(case, types) if(is.na(icase)) stop(paste("No points have mark =", case)) } else stop(paste("Unrecognised format for argument", sQuote("case"))) if(!control.given) icontrol <- 3 - icase } ## compute ...... switch(at, pixels = { ## compute probability of case pcase <- Deach[[icase]]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values nbg <- badvalues(pcase) if(any(nbg)) { ## apply l'Hopital's rule: ## p(case) = 1{nearest neighbour is case} distcase <- as.linim(distfun(Y[[icase]])) distcontrol <- as.linim(distfun(Y[[icontrol]])) closecase <- eval.linim(as.integer(distcase < distcontrol)) pcase[nbg] <- closecase[nbg] } if(!relative) { result <- pcase } else { result <- eval.im(ifelse(pcase < 1, pcase/(1-pcase), NA)) } }, points={ ## compute probability of case pcase <- Deach[,icase]/Dall ## correct small numerical errors pcase <- clamp01(pcase) ## trap NaN values if(any(nbg <- badvalues(pcase))) { ## apply l'Hopital's rule imarks <- as.integer(marx) nntype <- imarks[nnwhich(X)] pcase[nbg] <- as.integer(nntype[nbg] == icase) } if(!relative) { result <- pcase } else { result <- ifelse(pcase < 1, pcase/(1-pcase), NA) } }) } else { ## several types if(relative) { ## need 'control' type stopifnot(length(control) == 1) if(is.numeric(control)) { icontrol <- control <- as.integer(control) stopifnot(control %in% 1:ntypes) } else if(is.character(control)) { icontrol <- match(control, types) if(is.na(icontrol)) stop(paste("No points have mark =", control)) } else stop(paste("Unrecognised format for argument", sQuote("control"))) } switch(at, pixels={ probs <- as.solist(lapply(Deach, "/", e2=Dall)) ## correct small numerical errors probs <- as.solist(lapply(probs, clamp01)) ## trap NaN values nbg <- lapply(probs, badvalues) nbg <- Reduce("|", nbg) if(any(nbg)) { ## apply l'Hopital's rule whichnn <- as.linim(nnfun(X)) imarks <- as.integer(marx) typenn <- eval.im(imarks[whichnn]) typennsub <- typenn[nbg] for(k in seq_along(probs)) probs[[k]][nbg] <- (typennsub == k) } if(!relative) { result <- probs } else { result <- solapply(probs, divideifpositive, d = probs[[icontrol]]) } }, points = { probs <- Deach/Dall ## correct small numerical errors probs <- clamp01(probs) ## trap NaN values bad <- badvalues(probs) badrow <- matrowany(bad) if(any(badrow)) { ## apply l'Hopital's rule imarks <- as.integer(marx) typenn <- imarks[nnwhich(X)] probs[badrow, ] <- (typenn == col(result))[badrow, ] } if(!relative) { result <- probs } else { result <- probs/probs[,icontrol] } }) } attr(result, "sigma") <- sigma return(result) } clamp01 <- function(x) { if(is.linim(x)) return(eval.linim(pmin(pmax(x, 0), 1))) if(is.im(x)) return(eval.im(pmin(pmax(x, 0), 1))) if(is.data.frame(x)) x <- as.matrix(x) return(pmin(pmax(x, 0), 1)) } badvalues <- function(x) { if(is.linim(x)) return(eval.linim(!is.finite(x))) if(is.im(x)) return(eval.im(!is.finite(x))) if(is.data.frame(x)) x <- as.matrix(x) return(!(is.finite(x) | is.na(x))) } divideifpositive <- function(z, d) { eval.linim(ifelse(d > 0, z/d, NA)) } relrisk.lpp }) bw.relrisklpp <- local({ hargnames <- c("hmin", "hmax") bw.relrisklpp <- function(X, ..., method=c("likelihood", "leastsquares", "KelsallDiggle", "McSwiggan"), distance=c("path", "euclidean"), hmin=NULL, hmax=NULL, nh=NULL, fast=TRUE, fastmethod="onestep", floored=TRUE, reference=c("thumb", "uniform", "sigma"), allow.infinite=TRUE, epsilon=1e-20, fudge=0, verbose=FALSE, warn=TRUE) { startTime <- proc.time() stopifnot(is.lpp(X)) stopifnot(is.multitype(X)) method <- match.arg(method) reference <- match.arg(reference) distance <- match.arg(distance) if(is.null(nh)) nh <- switch(distance, path=256, euclidean=16) ## validate X marx <- marks(X) types <- levels(marx) ntypes <- length(types) if(ntypes == 1L) stop("There is only one type of point", call.=FALSE) if(ntypes > 2L && distance == "path") stop(paste("Sorry, bw.relrisklpp(distance='path') is not yet supported", "for > 2 types of points"), call.=FALSE) ## determine range of bandwidths if(got.hmax <- !missing(hmax)) { check.1.real(hmax) ; stopifnot(hmax > 0) } if(got.hmin <- !missing(hmin)) { check.1.real(hmin) ; stopifnot(hmin > 0) } if(got.hmax && got.hmin) { stopifnot(hmin < hmax) } else if(got.hmax) { hmin <- hmax/20 } else if(got.hmin) { hmax <- hmin * 20 } else { ss <- bw.scott.iso(X) dd <- diameter(Frame(X)) srange <- range(c(ss/10, ss*5, dd/5)) hmin <- srange[1L] hmax <- srange[2L] } if(verbose) splat("Bandwidth range:", prange(c(hmin, hmax))) ## if(distance == "euclidean") { if(verbose) splat("Euclidean smoothing") if(method %in% c("McSwiggan", "KelsallDiggle")) stop(paste0("Sorry, bw.relrisklpp(method=", sQuote(method), ") is not yet supported for > 2 types of points"), call.=FALSE) sigmavalues <- seq(hmin, hmax, length.out=nh) cv <- numeric(nh) witch <- cbind(seq_along(marx), as.integer(marx)) pstate <- list() if(verbose) cat(paste("Processing", nh, "values of bandwidth ...")) for(i in 1:nh) { si <- sigmavalues[i] p <- relrisk(X, si, at="points", distance="euclidean", casecontrol=FALSE) pobs <- p[witch] cv[i] <- switch(method, likelihood=log(prod(pobs)), leastsquares=sum((1-pobs)^2)) if(verbose) pstate <- progressreport(i, nh, state=pstate) } result <- switch(method, likelihood = bw.optim(cv, sigmavalues, optimum="max", hname="sigma", cvname="logL", criterion="likelihood cross-validation", hargnames=hargnames, unitname=unitname(X)), leastsquares = bw.optim(cv, sigmavalues, hname="sigma", cvname="psq", criterion="least squares cross-validation", hargnames=hargnames, unitname=unitname(X))) return(result) } ## ---------- heat kernel (distance='path') ------------------------------ sigma <- hmax nsigma <- ceiling(nh * hmax/(hmax-hmin)) #' if(verbose) splat("Setting up network data...") L <- domain(X) TOTLEN <- volume(L) g <- densityfun.lpp(X=unmark(X), sigma=sigma, nsigma=nsigma, exit="setup", verbose=FALSE, ...) #' extract internal data finenet <- g$linnet_obj lixelmap <- g$lixelmap lixelweight <- g$lixelweight Amatrix <- g$Amatrix ## U0 <- g$U0 # not used deltax <- g$deltax deltat <- g$deltat #' if(allow.infinite) { df <- as.data.frame(vertices(finenet))[,c("x","y","segcoarse","tpcoarse")] colnames(df) <- c("x", "y", "seg", "tp") fineverticescoarsenet <- lpp(df, L) } ## split into types Y <- split(X) X1 <- Y[[1L]] X2 <- Y[[2L]] n1 <- npoints(X1) n2 <- npoints(X2) #' discretise X1, X2 separately #' Each data point is mapped to two endpoints of a tiny segment I1 <- (as.integer(marx) == 1L) lixelweight1 <- lixelweight[I1] lixelmap1 <- lixelmap[I1] U01 <- tapplysum(lixelweight1, list(lixelmap1)) I2 <- !I1 lixelweight2 <- lixelweight[I2] lixelmap2 <- lixelmap[I2] U02 <- tapplysum(lixelweight2, list(lixelmap2)) #' determine number of time steps niter <- round((sigma^2)/(2 * deltat)) nsample <- length(U01) #' solve heat equation separately for X1 and X2 if(verbose) splat("Computing intensity estimates", "for", nsigma, "out of", niter, "bandwidth values at", nsample, "sample locations ...") K1 <- K2 <- matrix(0, nsample, nsigma) U1 <- U01 U2 <- U02 blocksize <- ceiling(niter/nsigma) pstate <- list() for(isave in 1:nsigma) { nit <- min(blocksize, niter - (isave-1L)*blocksize) if(nit > 0) { for(iter in 1:nit) { U1 <- as.numeric(Amatrix %*% U1) U2 <- as.numeric(Amatrix %*% U2) } if(verbose) pstate <- progressreport(isave, nsigma, state=pstate) } K1[, isave] <- U1 K2[, isave] <- U2 } if(verbose) splat("Done.") #' add small amount to log intensity logK1 <- log(K1 + epsilon) logK2 <- log(K2 + epsilon) logK1 <- t(logK1) logK2 <- t(logK2) #' Map each data point to closest endpoint J1 <- closeroftwo(lixelweight1, lixelmap1) J2 <- closeroftwo(lixelweight2, lixelmap2) #' Term ghat from Term 2 - intensity of Type 2 events at Type 1 locations ghat <- K2[J1, ] + epsilon ghat <- t(ghat) #' Term fhat from Term 3 - intensity of Type 1 events at Type 2 locations fhat <- K1[J2,] + epsilon fhat <- t(fhat) #' For Term 2 calculate f^(-i)_{h_1}(x_i) #' = intensity at type 1 event x_i estimated from all type 1 events except x_i #' Likewise g^(-j)_{h_1}(y_j) #' = intensity at type 2 event y_j estimated from all type 2 events except y_j if(verbose) splat("Computing leave-one-out estimates at data points.") if(verbose) cat("Type 1 ...") fminusi <- densitypointsLPP(X1, sigma, dx=deltax, dt=deltat, nsigma=nsigma, leaveoneout=TRUE, fast=fast, fastmethod=fastmethod, floored=floored) if(verbose) cat(" Done.\nType 2 ...") gminusj <- densitypointsLPP(X2, sigma, dx=deltax, dt=deltat, nsigma=nsigma, leaveoneout=TRUE, fast=fast, fastmethod=fastmethod, floored=floored) fminusi <- t(fminusi) gminusj <- t(gminusj) tau <- attr(gminusj, "sigma") use <- (hmin <= tau) & (tau <= hmax) if(verbose) splat("Done.") #' reference intensity (used in McSwiggan (modified K-D) method) switch(reference, sigma = { #' Use largest value of sigma #' reference intensity of type 1 process fbar <- K1[,nsigma] + epsilon #' reference intensity of type 2 process gbar <- K2[, nsigma] + epsilon #' leave-one-out estimates at data points fbarminusi <- fminusi[nsigma, ] gbarminusj <- gminusj[nsigma, ] }, thumb = { #' Use smoothers selected by rule of thumb b1 <- bw.scott.iso(X1) b2 <- bw.scott.iso(X2) i1 <- which.min(abs(b1 - tau)) i2 <- which.min(abs(b2 - tau)) #' reference intensity of type 1 process fbar <- K1[,i1] + epsilon #' reference intensity of type 2 process gbar <- K2[,i2] + epsilon #' leave-one-out estimates at data points fbarminusi <- fminusi[i1, ] gbarminusj <- gminusj[i2, ] }, uniform = { #' Use uniform intensity fbar <- rep.int(n1/TOTLEN, nrow(K1)) gbar <- rep.int(n2/TOTLEN, nrow(K2)) #' leave-one-out estimates at data points fbarminusi <- rep.int((n1-1)/TOTLEN, n1) gbarminusj <- rep.int((n2-1)/TOTLEN, n2) }) #' reference intensity of type 1 process at type 1 points #' fbari <- fbar[J1] # not used #' reference intensity of type 2 process at type 2 points #' gbarj <- gbar[J2] # not used #' Avoid very small estimates if(fudge > 0) { minloo <- fudge/TOTLEN gminusj[] <- pmax(minloo, gminusj[]) fminusi[] <- pmax(minloo, fminusi[]) gbarminusj[] <- pmax(minloo, gbarminusj[]) fbarminusi[] <- pmax(minloo, fbarminusi[]) } else { gminusj <- gminusj + epsilon fminusi <- fminusi + epsilon gbarminusj <- gbarminusj + epsilon fbarminusi <- fbarminusi + epsilon } if(allow.infinite) { ## also compute values for sigma = Inf ## corresponding to a constant relative risk if(verbose) splat("Computing estimates for sigma=Inf ...") fInfFun <- densityfun(X1, Inf) gInfFun <- densityfun(X2, Inf) fhatInf <- fInfFun(X2) # intensity of X1 at points of X2 ghatInf <- gInfFun(X1) # intensity of X2 at points of X1 K1Inf <- fInfFun(fineverticescoarsenet) # intensity of X1 at fine grid K2Inf <- gInfFun(fineverticescoarsenet) # intensity of X2 at fine grid #' intensity of X1 at points of X1, leave-one-out fminusiInf <- densitypointsLPP(X1, Inf, leaveoneout=TRUE) gminusjInf <- densitypointsLPP(X2, Inf, leaveoneout=TRUE) #' ensure they are row vectors fhatInf <- matrix(fhatInf[], nrow=1) ghatInf <- matrix(ghatInf[], nrow=1) fminusiInf <- matrix(fminusiInf[], nrow=1) gminusjInf <- matrix(gminusjInf[], nrow=1) K1Inf <- matrix(K1Inf[], nrow=1) K2Inf <- matrix(K2Inf[], nrow=1) logK1Inf <- log(K1Inf + epsilon) logK2Inf <- log(K2Inf + epsilon) } #' Compute terms in cross-validation score if(verbose) splat("Computing basic cross-validation terms ...") Term1 <- deltax * xvalterm1(logK1,logK2) Term1Inf <- deltax * xvalterm1(logK1Inf,logK2Inf) switch(method, KelsallDiggle = { #' ........... original Kelsall-Diggle criterion ................. if(verbose) splat("Computing Kelsall-Diggle criterion ...") Term2 <- (-2) * xvalterm2(fminusi, ghat) Term3 <- (-2) * xvalterm2(gminusj, fhat) ## Term3 <- t(Term3) CKD <- -Term1 + Term2 + Term3 #' repeat for sigma=Inf Term2Inf <- (-2) * xvalterm2(fminusiInf, ghatInf) Term3Inf <- (-2) * xvalterm2(gminusjInf, fhatInf) ## Term3Inf <- t(Term3Inf) Cinf <- -Term1Inf + Term2Inf + Term3Inf ## CKDout <- c(CKD[use], Cinf) tauout <- c(tau[use], Inf) result <- bw.optim(CKDout, tauout, hname="sigma", cvname="C", criterion="Kelsall-Diggle cross-validation", hargnames=hargnames, unitname=unitname(X)) }, McSwiggan = { #' .............. modified criterion ..................... if(verbose) splat("Computing modified Kelsall-Diggle criterion ...") ModTerm2 <- -2 * xvalterm4(fminusi, ghat, 1/fbarminusi) ModTerm3 <- -2 * xvalterm4(gminusj, fhat, 1/gbarminusj) Term4 <- -2 * deltax * xvalterm4(t(K1), t(K2), log(fbar/gbar)) Term4[!is.finite(Term4)] <- Inf modC <- Term1 + ModTerm2 + ModTerm3 + Term4 ## again for sigma=Inf ModTerm2Inf <- -2 * xvalterm4(fminusiInf, ghatInf, 1/fbarminusi) ModTerm3Inf <- -2 * xvalterm4(gminusjInf, fhatInf, 1/gbarminusj) Term4Inf <- -2 * deltax * xvalterm4(K1Inf, K2Inf, log(fbar/gbar)) Term4Inf[!is.finite(Term4Inf)] <- Inf Cinf <- Term1Inf + ModTerm2Inf + ModTerm3Inf + Term4Inf modCout <- c(modC[use], Cinf) tauout <- c(tau[use], Inf) result <- bw.optim(modCout, tauout, hname="sigma", cvname="Cmod", criterion="McSwiggan modified Kelsall-Diggle cross-validation", hargnames=hargnames, unitname=unitname(X)) }, likelihood = { #' .............. likelihood criterion ..................... if(verbose) splat("Computing likelihood criterion ...") TermA <- xvalterm5(fminusi, ghat) TermB <- xvalterm5(gminusj, fhat) loglik <- TermA + TermB ## TermAInf <- xvalterm5(fminusiInf, ghatInf) TermBInf <- xvalterm5(gminusjInf, fhatInf) loglikInf <- TermAInf + TermBInf ## loglikout <- c(loglik[use], loglikInf) tauout <- c(tau[use], Inf) # as.numeric(loglikInf) result <- bw.optim(loglikout, tauout, optimum="max", hname="sigma", cvname="logL", criterion="likelihood cross-validation", hargnames=hargnames, unitname=unitname(X)) }, leastsquares = { #' .............. least squares criterion ..................... if(verbose) splat("Computing least squares criterion ...") Term6A <- xvalterm6(fminusi, ghat) Term6B <- xvalterm6(gminusj, fhat) sqprob <- Term6A + Term6B #' Term6AInf <- xvalterm6(fminusiInf, ghatInf) Term6BInf <- xvalterm6(gminusjInf, fhatInf) sqprobInf <- Term6AInf + Term6BInf ## sqprobout <- c(sqprob[use], sqprobInf) tauout <- c(tau[use], Inf) result <- bw.optim(sqprobout, tauout, hname="sigma", cvname="psq", criterion="least squares cross-validation", hargnames=hargnames, unitname=unitname(X)) }) if(verbose) splat("Done.") result <- timed(result, starttime=startTime) return(result) } closeroftwo <- function(ww, ff) { even <- c(FALSE,TRUE) odd <- c(TRUE, FALSE) as.integer(ifelse(ww[even] > ww[odd], ff[even], ff[odd])) } xvalterm1 <- function(x, y) { rowSums((x-y)^2) } xvalterm2 <- function(x, y) { rowSums((log(x/y))/x) } xvalterm4 <- function(x, y, w) { as.numeric(log(x/y) %*% w) } xvalterm5 <- function(x, y) { rowSums(log(x/(x+y))) } xvalterm6 <- function(x, y) { rowSums((1 - x/(x+y))^2) } bw.relrisklpp }) spatstat.linnet/R/linequad.R0000644000176200001440000002256314144334007015543 0ustar liggesusers# # linequad.R # # $Revision: 1.16 $ $Date: 2021/01/07 03:53:56 $ # # create quadscheme for a pattern of points lying *on* line segments linequad <- function(X, Y, ..., eps=NULL, nd=1000, random=FALSE) { epsgiven <- !is.null(eps) if(is.lpp(X)) { # extract local coordinates from lpp object coo <- coords(X) mapXY <- coo$seg tp <- coo$tp Xproj <- as.ppp(X) if(!missing(Y) && !is.null(Y)) warning("Argument Y ignored when X is an lpp object") Y <- as.psp(X) } else if(is.ppp(X)) { # project data points onto segments stopifnot(is.psp(Y)) v <- project2segment(X, Y) Xproj <- v$Xproj mapXY <- v$mapXY tp <- v$tp } else stop("X should be an object of class lpp or ppp") # handle multitype ismulti <- is.multitype(X) if(is.marked(X) && !ismulti) stop("Not implemented for marked patterns") if(ismulti) { marx <- marks(X) flev <- factor(levels(marx)) } # win <- as.owin(Y) len <- lengths_psp(Y) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(nd) && length(nd) == 1L & is.finite(nd) && nd > 0) eps <- sum(len)/nd } else stopifnot(is.numeric(eps) && length(eps) == 1L && is.finite(eps) && eps > 0) ## if(is.lpp(X) && spatstat.options('Clinequad')) { L <- as.linnet(X) W <- Frame(L) V <- vertices(L) nV <- npoints(V) coordsV <- coords(V) coordsX <- coords(X) nX <- npoints(X) ooX <- order(coordsX$seg) ndumeach <- ceiling(len/eps) + 1L ndummax <- sum(ndumeach) maxdataperseg <- max(table(factor(coordsX$seg, levels=1:nsegments(L)))) maxscratch <- max(ndumeach) + maxdataperseg if(!ismulti) { if(!random) { z <- .C(SL_Clinequad, ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ndat = as.integer(nX), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE="spatstat.linnet") } else { z <- .C(SL_ClineRquad, ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ndat = as.integer(nX), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE="spatstat.linnet") } seqdum <- seq_len(z$ndum) dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], window=W, check=FALSE)) wdum <- z$wdum[seqdum] wdat <- numeric(nX) wdat[ooX] <- z$wdat dat <- as.ppp(X) } else { ntypes <- length(flev) ndummax <- ntypes * (ndummax + nX) maxscratch <- ntypes * maxscratch if(!random) { z <- .C(SL_ClineMquad, ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ntypes = as.integer(ntypes), ndat = as.integer(nX), xdat = as.double(coordsX$x), ydat = as.double(coordsX$y), mdat = as.integer(as.integer(marx)-1L), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), mdum = as.integer(integer(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE="spatstat.linnet") } else { z <- .C(SL_ClineRMquad, ns = as.integer(nseg), from = as.integer(L$from-1L), to = as.integer(L$to-1L), nv = as.integer(nV), xv = as.double(coordsV$x), yv = as.double(coordsV$y), eps = as.double(eps), ntypes = as.integer(ntypes), ndat = as.integer(nX), xdat = as.double(coordsX$x), ydat = as.double(coordsX$y), mdat = as.integer(as.integer(marx)-1L), sdat = as.integer(coordsX$seg[ooX]-1L), tdat = as.double(coordsX$tp[ooX]), wdat = as.double(numeric(nX)), ndum = as.integer(integer(1L)), xdum = as.double(numeric(ndummax)), ydum = as.double(numeric(ndummax)), mdum = as.integer(integer(ndummax)), sdum = as.integer(integer(ndummax)), tdum = as.double(numeric(ndummax)), wdum = as.double(numeric(ndummax)), maxscratch = as.integer(maxscratch), PACKAGE="spatstat.linnet") } seqdum <- seq_len(z$ndum) marques <- factor(z$mdum[seqdum] + 1L, labels=flev) dum <- with(z, ppp(xdum[seqdum], ydum[seqdum], marks=marques, window=W, check=FALSE)) wdum <- z$wdum[seqdum] wdat <- numeric(nX) wdat[ooX] <- z$wdat dat <- as.ppp(X) } } else { ## older, interpreted code ## initialise quad scheme dat <- dum <- ppp(numeric(0), numeric(0), window=win) wdat <- wdum <- numeric(0) if(ismulti) marks(dat) <- marks(dum) <- marx[integer(0)] ## consider each segment in turn YY <- as.data.frame(Y) for(i in 1:nseg) { ## divide segment into pieces of length eps ## with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) ## dummy points at middle of each piece sdum <- (brks[-1L] + brks[-nbrks])/2 x <- with(YY, x0[i] + (sdum/leni) * (x1[i]-x0[i])) y <- with(YY, y0[i] + (sdum/leni) * (y1[i]-y0[i])) newdum <- list(x=x, y=y) ndum <- length(sdum) IDdum <- 1:ndum ## relevant data points relevant <- (mapXY == i) newdat <- Xproj[relevant] sdat <- leni * tp[relevant] IDdat <- findInterval(sdat, brks, rightmost.closed=TRUE, all.inside=TRUE) ## determine weights w <- countingweights(id=c(IDdum, IDdat), areas=diff(brks)) wnewdum <- w[1:ndum] wnewdat <- w[-(1:ndum)] ## if(!ismulti) { ## unmarked pattern dat <- superimpose(dat, newdat, W=win, check=FALSE) dum <- superimpose(dum, newdum, W=win, check=FALSE) wdat <- c(wdat, wnewdat) wdum <- c(wdum, wnewdum) } else { ## marked point pattern ## attach correct marks to data points marks(newdat) <- marx[relevant] dat <- superimpose(dat, newdat, W=win, check=FALSE) wdat <- c(wdat, wnewdat) newdum <- as.ppp(newdum, W=win, check=FALSE) ## replicate dummy points with each mark ## also add points at data locations with other marks for(k in seq_len(length(flev))) { le <- flev[k] avoid <- (marks(newdat) != le) dum <- superimpose(dum, newdum %mark% le, newdat[avoid] %mark% le, W=win, check=FALSE) wdum <- c(wdum, wnewdum, wnewdat[avoid]) } } } } ## save parameters dmethod <- paste("Equally spaced along each segment at spacing eps =", signif(eps, 4), summary(unitname(X))$plural) if(!epsgiven) dmethod <- paste0(dmethod, "\nOriginal parameter nd = ", nd) wmethod <- "Counting weights based on segment length" param <- list(dummy = list(method=dmethod), weight = list(method=wmethod)) ## make quad scheme Qout <- quad(dat, dum, c(wdat, wdum), param=param) ## silently attach lines attr(Qout, "lines") <- Y return(Qout) } spatstat.linnet/R/unstacklpp.R0000644000176200001440000000055014144334010016107 0ustar liggesusers#' #' unstacklpp.R #' #' $Revision: 1.1 $ $Date: 2020/06/17 04:38:38 $ unstack.lpp <- function(x, ...) { trap.extra.arguments(...) marx <- marks(x) d <- dim(marx) if(is.null(d)) return(solist(x)) y <- rep(list(unmark(x)), d[2]) for(j in seq_along(y)) marks(y[[j]]) <- marx[,j,drop=FALSE] names(y) <- colnames(marx) return(as.solist(y)) } spatstat.linnet/R/linnetsurgery.R0000644000176200001440000003312014144334007016642 0ustar liggesusers#' #' linnetsurgery.R #' #' Surgery on linear networks and related objects #' #' $Revision: 1.31 $ $Date: 2020/04/01 04:44:15 $ #' insertVertices <- function(L, ...) { if(!inherits(L, c("lpp", "linnet"))) stop("L should be a linear network (linnet) or point pattern (lpp)", call.=FALSE) if(haspoints <- is.lpp(L)) { X <- L L <- as.linnet(L) cooXnew <- cooXold <- coords(X) segXold <- cooXold$seg tpXold <- cooXold$tp } ## validate new vertices V <- as.lpp(..., L=L) if(!identical(as.linnet(L, sparse=TRUE), as.linnet(V, sparse=TRUE))) stop("New vertices must lie on exactly the same network as L") if(npoints(V) == 0) { attr(L, "id") <- integer(0) if(!haspoints) { return(L) } else { X$domain <- L return(X) } } ## extract new vertex coordinates co <- coords(V) seg <- co$seg tp <- co$tp ## determine which segments will be split splitsegments <- sortunique(seg) notsplit <- rep(TRUE, nsegments(L)) notsplit[splitsegments] <- FALSE ## un-split segments will be retained and listed first, ## followed by the new pieces. ## Compute new serial numbers for the un-split segments. segmap <- cumsum(notsplit) nunsplit <- sum(notsplit) ## existing vertices v <- L$vertices n <- npoints(v) ## initialise lists of new vertices and edges nadd <- 0 vadd <- list(x=numeric(0), y=numeric(0)) fromadd <- toadd <- id <- integer(0) ## initialise mapping from output segments to input segments comefrom <- which(notsplit) ## split segments containing new vertices for(theseg in splitsegments) { ## find new vertices lying on segment 'theseg' those <- (seg == theseg) idthose <- which(those) ## order the new vertices along this segment tt <- tp[those] oo <- order(tt) tt <- tt[oo] idadd <- idthose[oo] ## make new vertices i <- L$from[theseg] j <- L$to[theseg] xnew <- with(v, x[i] + tt * diff(x[c(i,j)])) ynew <- with(v, y[i] + tt * diff(y[c(i,j)])) vnew <- list(x=xnew, y=ynew) nnew <- length(tt) ## replace edge i ~ j with edges i ~ k and k ~ j kk <- n + nadd + seq_len(nnew) fromnew <- c(i, kk) tonew <- c(kk, j) nnewseg <- nnew + 1L ## add new vertices and edges to running total nadd <- nadd + nnew vadd <- concatxy(vadd, vnew) fromadd <- c(fromadd, fromnew) toadd <- c(toadd, tonew) id <- c(id, idadd) comefrom <- c(comefrom, rep(theseg, nnewseg)) ## handle data points if any if(haspoints && any(relevant <- (segXold == theseg))) { tx <- tpXold[relevant] ttt <- c(0, tt, 1) m <- findInterval(tx, ttt, rightmost.closed=TRUE, all.inside=TRUE) t0 <- ttt[m] t1 <- ttt[m+1L] tpXnew <- (tx - t0)/(t1-t0) tpXnew <- pmin(1, pmax(0, tpXnew)) n0 <- nunsplit + length(fromadd) - nnewseg segXnew <- n0 + m cooXnew$seg[relevant] <- segXnew cooXnew$tp[relevant] <- tpXnew } } newfrom <- c(L$from[-splitsegments], fromadd) newto <- c(L$to[-splitsegments], toadd) edges <- cbind(newfrom, newto) reverse <- (newfrom > newto) if(anyrev <- any(reverse)) edges[reverse, ] <- edges[reverse, c(2,1)] newv <- superimpose(v, vadd, check=FALSE) Lnew <- linnet(newv, edges=edges, sparse=identical(L$sparse, TRUE), warn=FALSE) #' save information on provenance of line segments S <- Lnew$lines attr(S, "comefrom") <- comefrom if(length(comefrom) != nsegments(S)) stop(paste("Internal error: length of provenance vector =", length(comefrom), "!=", nsegments(S), "= number of segments"), call.=FALSE) #' copy marks if(!is.null(marx <- marks(L$lines))) marks(S) <- as.data.frame(marx)[comefrom, , drop=FALSE] Lnew$lines <- S #' save information identifying the new vertices in the new network newid <- integer(nadd) newid[id] <- n + 1:nadd attr(Lnew, "id") <- newid if(!haspoints) return(Lnew) ## adjust segment id for data points on segments that were not split Xnotsplit <- notsplit[segXold] cooXnew$seg[Xnotsplit] <- segmap[segXold[Xnotsplit]] ## adjust local coordinates if segment was reversed if(anyrev) { reversing <- reverse[cooXnew$seg] cooXnew$tp[reversing] <- 1 - cooXnew$tp[reversing] } Xnew <- lpp(cooXnew, Lnew) marks(Xnew) <- marks(X) attr(Xnew, "id") <- newid return(Xnew) } joinVertices <- function(L, from, to, marks=NULL) { if(!inherits(L, c("lpp", "linnet"))) stop("L should be a linear network (linnet) or point pattern (lpp)", call.=FALSE) if(haspoints <- is.lpp(L)) { X <- L L <- as.linnet(L) Xdf <- as.data.frame(X) } if((missing(to) || is.null(to)) && !is.null(dim(from)) && ncol(from) == 2) { to <- from[,2] from <- from[,1] } newfrom <- as.integer(from) newto <- as.integer(to) edges <- cbind(c(L$from, newfrom), c(L$to, newto)) Lnew <- linnet(vertices(L), edges=edges, sparse=L$sparse, warn=FALSE) #' assign marks only if provided if(!is.null(marks)) { marxnew <- (marks(L$lines) %mapp% marks) %msub% !duplicated(edges) if(!is.null(marxnew)) marks(Lnew$lines) <- marxnew } if(!is.null(L$toler)) Lnew$toler <- L$toler if(!haspoints) return(Lnew) X <- lpp(Xdf, Lnew) return(X) } repairNetwork <- function(X) { if(!inherits(X, c("linnet", "lpp"))) stop("X should be a linnet or lpp object", call.=FALSE) L <- as.linnet(X) V <- vertices(L) S <- L$lines from <- L$from to <- L$to ## check consistency between 'from,to' and 'lines' Sfrom <- endpoints.psp(S, "first") Sto <- endpoints.psp(S, "second") fromV <- nncross(Sfrom, V, what="which") toV <- nncross(Sto, V, what="which") problem <- NULL if(length(from) != nsegments(S)) { problem <- "Network data implied different numbers of edges" } else if(any(to != toV) || any(from != fromV)) { #' find genuinely different vertices tol <- L$toler %orifnull% default.linnet.tolerance(S) clash <- (from != fromV) ss <- Sfrom[clash] vv <- V[from[clash]] d <- sqrt((ss$x - vv$x)^2 + (ss$y - vv$y)^2) bad <- (max(d) > tol) if(!bad) { clash <- (to != toV) ss <- Sto[clash] vv <- V[to[clash]] d <- sqrt((ss$x - vv$x)^2 + (ss$y - vv$y)^2) bad <- (max(d) > tol) } if(bad) problem <- "Edge indices did not agree with segment endpoints" } if(!is.null(problem)) { if(is.marked(S)) { solution <- "edge indices were recomputed from line geometry" from <- L$from <- fromV to <- L$to <- toV } else { solution <- "lines were rebuilt from edge indices" xx <- V$x yy <- V$y L$lines <- psp(xx[from], yy[from], xx[to], yy[to], window=Window(V), check=FALSE) } warning(paste0(problem, "; ", solution), call.=FALSE) } reverse <- (from > to) if(any(reverse)) { newfrom <- ifelse(reverse, to, from) newto <- ifelse(reverse, from, to) from <- L$from <- newfrom to <- L$to <- newto L$lines$ends[reverse,] <- L$lines$ends[reverse, c(3,4,1,2)] if(is.lpp(X)) { X$domain <- L } else { X <- L } } edgepairs <- cbind(from, to) retainedges <- !duplicated(as.data.frame(edgepairs)) & (from != to) keepall <- all(retainedges) if(is.lpp(X) && (!keepall || any(reverse))) { #' adjust segment coordinates cooX <- coords(X) # hyperframe, may include marks oldseg <- as.integer(unlist(cooX$seg)) oldtp <- as.numeric(unlist(cooX$tp)) if(keepall) { newseg <- oldseg } else { segmap <- uniquemap(as.data.frame(edgepairs)) newseg <- segmap[oldseg] } newtp <- ifelse(reverse[oldseg], 1 - oldtp, oldtp) cooX$seg <- newseg cooX$tp <- newtp coords(X) <- cooX } if(keepall) return(X) Y <- thinNetwork(X, retainedges=retainedges) return(Y) } thinNetwork <- function(X, retainvertices, retainedges) { ## thin a network by retaining only the specified edges and/or vertices if(!inherits(X, c("linnet", "lpp"))) stop("X should be a linnet or lpp object", call.=FALSE) gotvert <- !missing(retainvertices) gotedge <- !missing(retainedges) if(!gotedge && !gotvert) return(X) L <- as.linnet(X) from <- L$from to <- L$to V <- L$vertices sparse <- identical(L$sparse, TRUE) #' determine which edges/vertices are to be retained edgesFALSE <- logical(nsegments(L)) verticesFALSE <- logical(npoints(V)) if(!gotedge) { retainedges <- edgesFALSE } else if(!is.logical(retainedges)) { z <- edgesFALSE z[retainedges] <- TRUE retainedges <- z } if(!gotvert) { retainvertices <- verticesFALSE } else if(!is.logical(retainvertices)) { z <- verticesFALSE z[retainvertices] <- TRUE retainvertices <- z } if(gotvert && !gotedge) { ## retain all edges between retained vertices retainedges <- retainvertices[from] & retainvertices[to] } else if(gotedge) { ## retain vertices required for the retained edges retainvertices[from[retainedges]] <- TRUE retainvertices[to[retainedges]] <- TRUE } ## assign new serial numbers to vertices, and recode Vsub <- V[retainvertices] newserial <- cumsum(retainvertices) newfrom <- newserial[from[retainedges]] newto <- newserial[to[retainedges]] ## remove duplicate segments reverse <- (newfrom > newto) edgepairs <- cbind(ifelse(reverse, newto, newfrom), ifelse(reverse, newfrom, newto)) nontrivial <- (newfrom != newto) & !duplicated(edgepairs) edgepairs <- edgepairs[nontrivial,,drop=FALSE] reverse <- reverse[nontrivial] ## extract relevant subset of network Lsub <- linnet(Vsub, edges=edgepairs, sparse=sparse, warn=FALSE) ## tack on information about subset attr(Lsub, "retainvertices") <- retainvertices attr(Lsub, "retainedges") <- retainedges ## done? if(inherits(X, "linnet")) return(Lsub) ## X is an lpp object ## Find data points that lie on accepted segments dat <- X$data # hyperframe, may include marks ok <- retainedges[unlist(dat$seg)] dsub <- dat[ok, , drop=FALSE] ## compute new serial numbers for retained segments segmap <- cumsum(retainedges) oldseg <- as.integer(unlist(dsub$seg)) dsub$seg <- newseg <- segmap[oldseg] ## adjust tp coordinate if segment endpoints were reversed if(any(revseg <- reverse[newseg])) { tp <- as.numeric(unlist(dsub$tp)) dsub$tp[revseg] <- 1 - tp[revseg] } # make new lpp object Y <- ppx(data=dsub, domain=Lsub, coord.type=as.character(X$ctype)) class(Y) <- c("lpp", class(Y)) ## tack on information about subset attr(Y, "retainpoints") <- ok return(Y) } validate.lpp.coords <- function(X, fatal=TRUE, context="") { ## check for mangled internal data proj <- project2segment(as.ppp(X), as.psp(as.linnet(X))) seg.claimed <- coords(X)$seg seg.mapped <- proj$mapXY if(any(seg.claimed != seg.mapped)) { whinge <- paste("Incorrect segment id", context) if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(FALSE) } tp.claimed <- coords(X)$tp tp.mapped <- proj$tp v <- max(abs(tp.claimed - tp.mapped)) if(v > 0.01) { whinge <- paste("Incorrect 'tp' coordinate", paren(paste("max discrepancy", v)), context) if(fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(FALSE) } return(TRUE) } addVertices <- function(L, X, join=NULL, joinmarks=NULL) { if(!inherits(L, c("lpp", "linnet"))) stop("L should be a linear network (linnet) or point pattern (lpp)", call.=FALSE) X <- as.ppp(X) if(haspoints <- is.lpp(L)) { Y <- L L <- as.linnet(L) } sparse <- L$sparse || is.null(L$dpath) V <- vertices(L) nV <- npoints(V) from <- L$from to <- L$to ## new vertices nX <- npoints(X) Vplus <- superimpose(V, X, check=FALSE) nplus <- npoints(Vplus) iold <- seq_len(nV) inew <- nV + seq_len(nX) ## make new network Lplus <- L Lplus$vertices <- Vplus Lplus$window <- Window(Vplus) Lplus$sparse <- sparse ## 'lines', 'from', 'to', 'toler' are unchanged mplus <- sparseMatrix(i=c(from, to), j=c(to,from), x=TRUE, dims=c(nplus, nplus)) if(!sparse) mplus <- as.matrix(mplus) Lplus$m <- mplus if(!sparse) { dold <- L$dpath dnew <- matrix(Inf, nplus, nplus) diag(dnew) <- 0 dnew[iold, iold] <- dold Lplus$dpath <- dnew } if(haspoints) { Y$domain <- Lplus # sufficient; point coordinates are still valid } out <- if(haspoints) Y else Lplus ## optionally join new vertices to existing network if(!is.null(join)) { if(is.numeric(join)) { check.nvector(join, nX, things="points of X") out <- joinVertices(out, inew, join, marks=joinmarks) } else if(is.character(join)) { join <- match.arg(join, c("vertices", "nearest")) switch(join, vertices={ join <- nncross(X, V, what="which") out <- joinVertices(out, inew, join, marks=joinmarks) }, nearest ={ #' find nearest points on L p <- project2segment(X, as.psp(Lplus)) locX <- data.frame(seg=p$mapXY, tp=p$tp) #' make them vertices out <- insertVertices(out, locX) #' join X to these new vertices joinid <- attr(out, "id") out <- joinVertices(out, inew, joinid, marks=joinmarks) }) } else if(is.lpp(join)) { stopifnot(npoints(join) == npoints(X)) out <- insertVertices(out, join) joinid <- attr(out, "id") out <- joinVertices(out, inew, joinid, marks=joinmarks) } else stop("Format of 'join' is not understood", call.=FALSE) } attr(out, "id") <- inew return(out) } spatstat.linnet/R/lpp.R0000644000176200001440000005005214144334007014526 0ustar liggesusers# # lpp.R # # $Revision: 1.71 $ $Date: 2020/12/19 05:25:06 $ # # Class "lpp" of point patterns on linear networks lpp <- function(X, L, ...) { stopifnot(inherits(L, "linnet")) if(missing(X) || is.null(X)) { ## empty pattern df <- data.frame(x=numeric(0), y=numeric(0)) lo <- data.frame(seg=integer(0), tp=numeric(0)) } else { localnames <- c("seg", "tp") spatialnames <- c("x", "y") allcoordnames <- c(spatialnames, localnames) if(is.matrix(X)) X <- as.data.frame(X) if(checkfields(X, localnames)) { #' X includes at least local coordinates X <- as.data.frame(X) #' validate local coordinates if(nrow(X) > 0) { nedge <- nsegments(L) if(with(X, any(seg < 1 | seg > nedge))) stop("Segment index coordinate 'seg' exceeds bounds") if(with(X, any(tp < 0 | tp > 1))) stop("Local coordinate 'tp' outside [0,1]") } if(!checkfields(X, spatialnames)) { #' data give local coordinates only #' reconstruct x,y coordinates from local coordinates Y <- local2lpp(L, X$seg, X$tp, df.only=TRUE) X[,spatialnames] <- Y[,spatialnames,drop=FALSE] } #' local coordinates lo <- X[ , localnames, drop=FALSE] #' spatial coords and marks marknames <- setdiff(names(X), allcoordnames) df <- X[, c(spatialnames, marknames), drop=FALSE] } else { #' local coordinates must be computed from spatial coordinates if(!is.ppp(X)) X <- as.ppp(X, W=L$window, ...) #' project to segment pro <- project2segment(X, as.psp(L)) #' projected points (spatial coordinates and marks) df <- as.data.frame(pro$Xproj) #' local coordinates lo <- data.frame(seg=pro$mapXY, tp=pro$tp) } } # combine spatial, local, marks nmark <- ncol(df) - 2 if(nmark == 0) { df <- cbind(df, lo) ctype <- c(rep("s", 2), rep("l", 2)) } else { df <- cbind(df[,1:2], lo, df[, -(1:2), drop=FALSE]) ctype <- c(rep("s", 2), rep("l", 2), rep("m", nmark)) } out <- ppx(data=df, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } print.lpp <- function(x, ...) { stopifnot(inherits(x, "lpp")) splat("Point pattern on linear network") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names splat(np, ngettext(np, "point", "points")) ## check for unusual coordinates ctype <- x$ctype nam.m <- nama[ctype == "mark"] nam.t <- nama[ctype == "temporal"] nam.c <- setdiff(nama[ctype == "spatial"], c("x","y")) nam.l <- setdiff(nama[ctype == "local"], c("seg", "tp")) if(length(nam.c) > 0) splat("Additional spatial coordinates", commasep(sQuote(nam.c))) if(length(nam.l) > 0) splat("Additional local coordinates", commasep(sQuote(nam.l))) if(length(nam.t) > 0) splat("Additional temporal coordinates", commasep(sQuote(nam.t))) if((nmarks <- length(nam.m)) > 0) { if(nmarks > 1) { splat(nmarks, "columns of marks:", commasep(sQuote(nam.m))) } else { marx <- marks(x) if(is.factor(marx)) { exhibitStringList("Multitype, with possible types:", levels(marx)) } else splat("Marks of type", sQuote(typeof(marx))) } } print(x$domain, ...) return(invisible(NULL)) } plot.lpp <- function(x, ..., main, add=FALSE, use.marks=TRUE, which.marks=NULL, show.all=!add, show.window=FALSE, show.network=TRUE, do.plot=TRUE, multiplot=TRUE) { if(missing(main)) main <- short.deparse(substitute(x)) ## Handle multiple columns of marks as separate plots ## (unless add=TRUE or which.marks selects a single column ## or multiplot = FALSE) mx <- marks(x) if(use.marks && !is.null(dim(mx))) { implied.all <- is.null(which.marks) want.several <- implied.all || !is.null(dim(mx <- mx[,which.marks,drop=TRUE])) do.several <- want.several && !add && multiplot if(want.several) mx <- as.data.frame(mx) #' ditch hyperframe columns if(do.several) { ## generate one plot for each column of marks y <- solapply(mx, setmarks, x=x) out <- do.call(plot, c(list(x=y, main=main, do.plot=do.plot, show.window=show.window), list(...))) return(invisible(out)) } if(is.null(which.marks)) { which.marks <- 1 if(do.plot) message("Plotting the first column of marks") } } ## single plot ## determine space required, including legend P <- as.ppp(x) a <- plot(P, ..., do.plot=FALSE, use.marks=use.marks, which.marks=which.marks) if(!do.plot) return(a) ## initialise graphics space if(!add) { if(show.window) { plot(Window(P), main=main, invert=TRUE, ...) } else { b <- attr(a, "bbox") plot(b, type="n", main=main, ..., show.all=FALSE) } } ## plot linear network if(show.network) { L <- as.linnet(x) dont.complain.about(L) do.call.matched(plot.linnet, resolve.defaults(list(x=quote(L), add=TRUE), list(...)), extrargs=c("lty", "lwd", "col")) } ## plot points, legend, title ans <- do.call.matched(plot.ppp, c(list(x=P, add=TRUE, main=main, use.marks=use.marks, which.marks=which.marks, show.all=show.all, show.window=FALSE), list(...)), extrargs=c("shape", "size", "pch", "cex", "fg", "bg", "cols", "lty", "lwd", "etch", "cex.main", "col.main", "line", "outer", "sub")) return(invisible(ans)) } summary.lpp <- function(object, ...) { stopifnot(inherits(object, "lpp")) L <- object$domain result <- summary(L) np <- npoints(object) result$npoints <- np <- npoints(object) result$intensity <- np/result$totlength result$is.marked <- is.marked(object) result$is.multitype <- is.multitype(object) mks <- marks(object) result$markformat <- mkf <- markformat(mks) switch(mkf, none = { result$multiple.marks <- FALSE }, vector = { result$multiple.marks <- FALSE if(result$is.multitype) { tm <- as.vector(table(mks)) tfp <- data.frame(frequency=tm, proportion=tm/sum(tm), intensity=tm/result$totlength, row.names=levels(mks)) result$marks <- tfp result$is.numeric <- FALSE } else { result$marks <- summary(mks) result$is.numeric <- is.numeric(mks) } result$marknames <- "marks" result$marktype <- typeof(mks) }, dataframe = , hyperframe = { result$multiple.marks <- TRUE result$marknames <- names(mks) result$is.numeric <- FALSE result$marktype <- mkf result$is.multitype <- FALSE result$marks <- summary(mks) }) class(result) <- "summary.lpp" return(result) } print.summary.lpp <- function(x, ...) { what <- if(x$is.multitype) "Multitype point pattern" else if(x$is.marked) "Marked point pattern" else "Point pattern" splat(what, "on linear network") splat(x$npoints, "points") splat("Linear network with", x$nvert, "vertices and", x$nline, "lines") u <- x$unitinfo dig <- getOption('digits') splat("Total length", signif(x$totlength, dig), u$plural, u$explain) splat("Average intensity", signif(x$intensity, dig), "points per", if(u$vanilla) "unit length" else u$singular) if(x$is.marked) { if(x$multiple.marks) { splat("Mark variables:", commasep(x$marknames, ", ")) cat("Summary of marks:\n") print(x$marks) } else if(x$is.multitype) { cat("Types of points:\n") print(signif(x$marks,dig)) } else { splat("marks are ", if(x$is.numeric) "numeric, ", "of type ", sQuote(x$marktype), sep="") cat("Summary:\n") print(x$marks) } } else splat("Unmarked") print(x$win, prefix="Enclosing window: ") invisible(NULL) } intensity.lpp <- function(X, ...) { len <- sum(lengths_psp(as.psp(as.linnet(X)))) if(is.multitype(X)) table(marks(X))/len else npoints(X)/len } is.lpp <- function(x) { inherits(x, "lpp") } is.multitype.lpp <- function(X, na.action="warn", ...) { marx <- marks(X) if(is.null(marx)) return(FALSE) if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1) return(FALSE) if(!is.factor(marx)) return(FALSE) if((length(marx) > 0) && anyNA(marx)) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } as.lpp <- function(x=NULL, y=NULL, seg=NULL, tp=NULL, ..., marks=NULL, L=NULL, check=FALSE, sparse) { nomore <- is.null(y) && is.null(seg) && is.null(tp) if(inherits(x, "lpp") && nomore) { X <- x if(!missing(sparse) && !is.null(sparse)) X$domain <- as.linnet(domain(X), sparse=sparse) } else { if(!inherits(L, "linnet")) stop("L should be a linear network") if(!missing(sparse) && !is.null(sparse)) L <- as.linnet(L, sparse=sparse) if(is.ppp(x) && nomore) { X <- lpp(x, L) } else if(is.data.frame(x) && nomore) { X <- do.call(as.lpp, resolve.defaults(as.list(x), list(...), list(marks=marks, L=L, check=check))) } else if(is.null(x) && is.null(y) && !is.null(seg) && !is.null(tp)){ X <- lpp(data.frame(seg=seg, tp=tp), L=L) } else { if(is.numeric(x) && length(x) == 2 && is.null(y)) { xy <- list(x=x[1L], y=x[2L]) } else { xy <- xy.coords(x,y)[c("x", "y")] } if(!is.null(seg) && !is.null(tp)) { # add segment map information xy <- append(xy, list(seg=seg, tp=tp)) } else { # convert to ppp, typically suppressing check mechanism xy <- as.ppp(xy, W=as.owin(L), check=check) } X <- lpp(xy, L) } } if(!is.null(marks)) marks(X) <- marks return(X) } as.ppp.lpp <- function(X, ..., fatal=TRUE) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain Y <- as.ppp(coords(X, temporal=FALSE, local=FALSE), W=L$window, check=FALSE) if(!is.null(marx <- marks(X))) { if(is.hyperframe(marx)) marx <- as.data.frame(marx) marks(Y) <- marx } return(Y) } Window.lpp <- function(X, ...) { as.owin(X) } "Window<-.lpp" <- function(X, ..., check=TRUE, value) { if(check) { X <- X[value] } else { Window(X$domain, check=FALSE) <- value } return(X) } as.owin.lpp <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W, ..., fatal=fatal)) } domain.lpp <- function(X, ...) { as.linnet(X) } as.linnet.lpp <- function(X, ..., fatal=TRUE, sparse) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain if(!missing(sparse)) L <- as.linnet(L, sparse=sparse) return(L) } unitname.lpp <- function(x) { u <- unitname(x$domain) return(u) } "unitname<-.lpp" <- function(x, value) { w <- x$domain unitname(w) <- value x$domain <- w return(x) } "marks<-.lpp" <- function(x, ..., value) { NextMethod("marks<-") } unmark.lpp <- function(X) { NextMethod("unmark") } as.psp.lpp <- function(x, ..., fatal=TRUE){ verifyclass(x, "lpp", fatal=fatal) return(x$domain$lines) } nsegments.lpp <- function(x) { return(x$domain$lines$n) } local2lpp <- function(L, seg, tp, X=NULL, df.only=FALSE) { stopifnot(inherits(L, "linnet")) if(is.null(X)) { # map to (x,y) Ldf <- as.data.frame(L$lines) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) x <- with(Ldf, x0[seg] + tp * dx[seg]) y <- with(Ldf, y0[seg] + tp * dy[seg]) } else { x <- X$x y <- X$y } # compile into data frame data <- data.frame(x=x, y=y, seg=seg, tp=tp) if(df.only) return(data) ctype <- c("s", "s", "l", "l") out <- ppx(data=data, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } #################################################### # subset extractor #################################################### "[.lpp" <- function (x, i, j, drop=FALSE, ..., snip=TRUE) { if(!missing(i) && !is.null(i)) { if(is.owin(i)) { # spatial domain: call code for 'j' xi <- x[,i,snip=snip] } else { # usual row-type index da <- x$data daij <- da[i, , drop=FALSE] xi <- ppx(data=daij, domain=x$domain, coord.type=as.character(x$ctype)) if(drop) xi <- xi[drop=TRUE] # call [.ppx to remove unused factor levels class(xi) <- c("lpp", class(xi)) } x <- xi } if(missing(j) || is.null(j)) return(x) stopifnot(is.owin(j)) x <- repairNetwork(x) w <- j L <- x$domain if(is.vanilla(unitname(w))) unitname(w) <- unitname(x) # Find vertices that lie inside 'w' vertinside <- inside.owin(L$vertices, w=w) from <- L$from to <- L$to if(snip) { ## For efficiency, first restrict network to relevant segments. ## Find segments EITHER OF whose endpoints lie in 'w' okedge <- vertinside[from] | vertinside[to] ## extract relevant subset of network graph x <- thinNetwork(x, retainedges=okedge) ## Now add vertices at crossing points with boundary of 'w' b <- crossing.psp(as.psp(L), edges(w)) x <- insertVertices(x, unique(b)) boundarypoints <- attr(x, "id") ## update data L <- x$domain from <- L$from to <- L$to vertinside <- inside.owin(L$vertices, w=w) vertinside[boundarypoints] <- TRUE } ## find segments whose endpoints BOTH lie in 'w' edgeinside <- vertinside[from] & vertinside[to] ## extract relevant subset of network xnew <- thinNetwork(x, retainedges=edgeinside) ## adjust window without checking Window(xnew, check=FALSE) <- w return(xnew) } #################################################### # affine transformations #################################################### scalardilate.lpp <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$data$x <- f * as.numeric(X$data$x) Y$data$y <- f * as.numeric(X$data$y) Y$domain <- scalardilate(X$domain, f) return(Y) } affine.lpp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "lpp") Y <- X Y$data[, c("x","y")] <- affinexy(X$data[, c("x","y")], mat=mat, vec=vec) Y$domain <- affine(X$domain, mat=mat, vec=vec, ...) return(Y) } shift.lpp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "lpp") Y <- X Y$domain <- if(missing(vec)) { shift(X$domain, ..., origin=origin) } else { shift(X$domain, vec=vec, ..., origin=origin) } vec <- getlastshift(Y$domain) Y$data[, c("x","y")] <- shiftxy(X$data[, c("x","y")], vec=vec) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.lpp <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "lpp") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$data[, c("x","y")] <- rotxy(X$data[, c("x","y")], angle=angle) Y$domain <- rotate(X$domain, angle=angle, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rescale.lpp <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } superimpose.lpp <- function(..., L=NULL) { objects <- list(...) if(!is.null(L) && !inherits(L, "linnet")) stop("L should be a linear network") if(length(objects) == 0) { if(is.null(L)) return(NULL) emptyX <- lpp(list(x=numeric(0), y=numeric(0)), L) return(emptyX) } islpp <- unlist(lapply(objects, is.lpp)) if(is.null(L) && !any(islpp)) stop("Cannot determine linear network: no lpp objects given") nets <- unique(lapply(objects[islpp], as.linnet)) if(length(nets) > 1) stop("Point patterns are defined on different linear networks") if(!is.null(L)) { nets <- unique(append(nets, list(L))) if(length(nets) > 1) stop("Argument L is a different linear network") } L <- nets[[1L]] ## convert list(x,y) to linear network, etc if(any(!islpp)) objects[!islpp] <- lapply(objects[!islpp], lpp, L=L) ## concatenate coordinates locns <- do.call(rbind, lapply(objects, coords)) ## concatenate marks (or use names of arguments) marx <- superimposeMarks(objects, sapply(objects, npoints)) ## make combined pattern Y <- lpp(locns, L) marks(Y) <- marx return(Y) } identify.lpp <- function(x, ...) { verifyclass(x, "lpp") P <- as.ppp(x) id <- identify(P$x, P$y, ...) if(!is.marked(x)) return(id) marks <- as.data.frame(P)[id, -(1:2)] out <- cbind(data.frame(id=id), marks) row.names(out) <- NULL return(out) } cut.lpp <- function(x, z=marks(x), ...) { if(missing(z) || is.null(z)) { z <- marks(x, dfok=TRUE) if(is.null(z)) stop("no data for grouping: z is missing, and x has no marks") } else { #' special objects if(inherits(z, "linim")) { z <- z[x, drop=FALSE] } else if(inherits(z, "linfun")) { z <- z(x) } else if(inherits(z, "lintess")) { z <- (as.linfun(z))(x) } } if(is.character(z)) { if(length(z) == npoints(x)) { # interpret as a factor z <- factor(z) } else if((length(z) == 1) && (z %in% colnames(df <- as.data.frame(x)))) { # interpret as the name of a column of marks or a coordinate zname <- z z <- df[, zname] if(zname == "seg") z <- factor(z) } else stop("format of argument z not understood") } switch(markformat(z), none = stop("No data for grouping"), vector = { stopifnot(length(z) == npoints(x)) g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) }, dataframe = , hyperframe = { stopifnot(nrow(z) == npoints(x)) #' extract atomic data z <- as.data.frame(z) if(ncol(z) < 1) stop("No suitable data for grouping") #' take first column of atomic data z <- z[,1L,drop=TRUE] g <- if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) }, list = stop("Don't know how to cut according to a list")) stop("Format of z not understood") } points.lpp <- function(x, ...) { points(coords(x, spatial=TRUE, local=FALSE), ...) } connected.lpp <- function(X, R=Inf, ..., dismantle=TRUE) { if(!dismantle) { if(is.infinite(R)) { Y <- X %mark% factor(1) attr(Y, "retainpoints") <- attr(X, "retainpoints") return(Y) } check.1.real(R) stopifnot(R >= 0) nv <- npoints(X) close <- (pairdist(X) <= R) diag(close) <- FALSE ij <- which(close, arr.ind=TRUE) lab0 <- cocoEngine(nv, ij[,1] - 1L, ij[,2] - 1L, "connected.lpp") lab <- lab0 + 1L # Renumber labels sequentially lab <- as.integer(factor(lab)) # Convert labels to factor lab <- factor(lab) # Apply to points Y <- X %mark% lab attr(Y, "retainpoints") <- attr(X, "retainpoints") return(Y) } # first break the *network* into connected components L <- domain(X) lab <- connected(L, what="labels") if(length(levels(lab)) == 1) { XX <- solist(X) } else { subsets <- split(seq_len(nvertices(L)), lab) XX <- solist() for(i in seq_along(subsets)) XX[[i]] <- thinNetwork(X, retainvertices=subsets[[i]]) } # now find R-connected components in each dismantled piece YY <- solapply(XX, connected.lpp, R=R, dismantle=FALSE) if(length(YY) == 1) YY <- YY[[1]] return(YY) } text.lpp <- function(x, ...) { co <- coords(x) graphics::text.default(x=co$x, y=co$y, ...) } spatstat.linnet/R/lindirichlet.R0000644000176200001440000001201714144334007016404 0ustar liggesusers#' lindirichlet.R #' #' Dirichlet tessellation on a linear network #' #' $Revision: 1.10 $ $Date: 2020/03/16 10:28:51 $ lineardirichlet <- function(X) { stopifnot(is.lpp(X)) #' unique points, remembering original sequence ii <- which(!duplicated(X)) uX <- X[ii] #' local coordinates coUX <- coords(uX)[, c("seg", "tp")] #' add label from original sequence index coUX$lab <- ii #' reorder oo <- with(coUX, order(seg, tp)) coUXord <- coUX[oo, , drop=FALSE] seg <- coUXord$seg tp <- coUXord$tp #' network data L <- domain(X) nv <- nvertices(L) ns <- nsegments(L) seglen <- lengths_psp(as.psp(L)) from <- L$from to <- L$to #' upper bound on interpoint distance huge <- sum(seglen) #' numerical tolerance for nnwhich tol <- max(sqrt(.Machine$double.eps), diameter(Frame(L))/2^20) #' Find data point (in sorted pattern) nearest to each vertex of network a <- vnnFind(seg, tp, ns, nv, from, to, seglen, huge, tol) vnndist <- a$vnndist vnnwhich <- a$vnnwhich #' index back into original data pattern vnnlab <- coUXord$lab[vnnwhich] #' compute Dirichlet tessellation df <- ldtEngine(nv, ns, from, to, seglen, huge, coUXord, vnndist, vnnwhich, vnnlab) return(lintess(L, df)) } ldtEngine <- function(nv, ns, from, to, seglen, huge, # network coUXord, # point coordinates, sorted vnndist, vnnwhich, # nearest data point for each vertex vnnlab) { #' initialise tessellation data seg <- integer(0) t0 <- numeric(0) t1 <- numeric(0) tile <- integer(0) #' split point data by segment, discarding segments which contain no points fseg <- factor(coUXord$seg, levels=1:ns) blist <- split(coUXord, fseg, drop=TRUE) #' process each segment containing data points for(b in blist) { n <- nrow(b) #' which segment? sygmund <- b$seg[[1L]] lenf <- seglen[sygmund] #' segment endpoints A <- from[sygmund] B <- to[sygmund] #' data points (from X) closest to endpoints jA <- vnnlab[A] jB <- vnnlab[B] dA <- vnndist[A] dB <- vnndist[B] #' data points (along segment) closest to endpoints iA <- b$lab[1L] iB <- b$lab[n] #' splits between consecutive data points btp <- b$tp tcut <- if(n < 2) numeric(0) else (btp[-1] + btp[-n])/2 labs <- b$lab #' consider left endpoint if(jA == iA) { #' leftmost data point covers left endpoint tcut <- c(0, tcut) } else { #' cut between left endpoint and leftmost data point dA1 <- lenf * btp[1L] dx <- (dA1 - dA)/2 if(dx > 0) { #' expected! tx <- dx/lenf tcut <- c(0, tx, tcut) labs <- c(jA, labs) } else { #' unexpected tcut <- c(0, tcut) } } #' consider right endpoint if(jB == iB) { #' rightmost data point covers right endpoint tcut <- c(tcut, 1) } else { #' cut between right endpoint and rightmost data point dB1 <- lenf * (1 - btp[n]) dx <- (dB1 - dB)/2 if(dx > 0) { #' expected! tx <- 1 - dx/lenf tcut <- c(tcut, tx, 1) labs <- c(labs, jB) } else { #' unexpected tcut <- c(tcut, 1) } } m <- length(tcut) seg <- c(seg, rep(sygmund, m-1L)) t0 <- c(t0, tcut[-m]) t1 <- c(t1, tcut[-1L]) tile <- c(tile, labs) } df <- data.frame(seg=seg, t0=t0, t1=t1, tile=tile) #' now deal with segments having no data points unloved <- (table(fseg) == 0) if(any(unloved)) { unlovedt0 <- rep(0, 2*sum(unloved)) unlovedt1 <- rep(1, 2*sum(unloved)) unlovedseg <- unlovedtile <- rep(-1, 2*sum(unloved)) counter <- 0 for(sygmund in which(unloved)) { counter <- counter + 1 lenf <- seglen[sygmund] #' segment endpoints A <- from[sygmund] B <- to[sygmund] #' data points (from X) closest to endpoints jA <- vnnlab[A] jB <- vnnlab[B] dA <- vnndist[A] dB <- vnndist[B] if(is.na(jA) || is.na(jB) || jA == jB) { #' entire segment is covered by one tile unlovedtile[counter] <- if(is.na(jA)) jB else jA unlovedseg[counter] <- sygmund } else { #' split somewhere tx <- (dB - dA + lenf)/(2 * lenf) if(tx >= 0 && tx <= 1) { unlovedseg[counter] <- sygmund unlovedtile[counter] <- jA unlovedt1[counter] <- tx counter <- counter + 1 unlovedseg[counter] <- sygmund unlovedtile[counter] <- jB unlovedt0[counter] <- tx } else if(tx < 0) { # weird unlovedseg[counter] <- sygmund unlovedtile[counter] <- jB } else { # weird unlovedseg[counter] <- sygmund unlovedtile[counter] <- jA } } } newdf <- data.frame(seg = unlovedseg[1:counter], t0 = unlovedt0[1:counter], t1 = unlovedt1[1:counter], tile = unlovedtile[1:counter]) df <- rbind(df, newdf) } return(df) } spatstat.linnet/R/densityfunlpp.R0000644000176200001440000000311114144334007016631 0ustar liggesusers## densityfunlpp.R ## Method for 'densityfun' for lpp objects ## ## Copyright (c) Greg McSwiggan and Adrian Baddeley 2017-2020 ## ## $Revision: 1.10 $ $Date: 2020/04/18 08:10:11 $ densityfun.lpp <- function(X, sigma, ..., weights=NULL, nsigma=1, verbose=FALSE) { stopifnot(is.lpp(X)) check.1.real(sigma) if(sigma == Inf) { if(nsigma != 1) stop("nsigma must be equal to 1 when sigma is infinite") return(flatdensityfunlpp(X, weights=weights, disconnect=TRUE)) } else check.finite(sigma) if(!is.null(weights)) check.nvector(weights, npoints(X)) #' L <- as.linnet(X) p <- resolve.heat.steps(sigma, L=L, ..., nsave=nsigma, verbose=verbose) #' internal argument exit <- resolve.1.default(list(exit="no"), list(...)) exit <- match.arg(exit, c("no", "parameters", "setup")) if(exit == "parameters") return(p) setuponly <- (exit == "setup") #' call Greg's solver a <- FDMKERNEL(lppobj=X, weights=weights, dtx=p$dx, dtt=p$dt, M=p$niter, nsave=p$nsave, stepnames=list(time="dt", space="dx"), setuponly=setuponly, verbose=verbose) if(setuponly) return(resolve.defaults(a, p)) #' if(nsigma == 1) { #' return smoother with bandwidth sigma result <- a$kernel_fun attr(result, "sigma") <- sigma } else { #' return multiple smoothers with bandwidths sigma * (k-1)/nsigma #' for k = 1, ..., nsigma+1 result <- a$progressfun attr(result, "sigma") <- a$tau } attr(result, "dx") <- a$deltax attr(result, "dt") <- a$deltat return(result) } spatstat.linnet/R/clickjoin.R0000644000176200001440000000147214144334010015674 0ustar liggesusers# # clickjoin.R # # interactive addition/deletion of segments between vertices # clickjoin <- function(X, ..., add=TRUE, m=NULL, join=TRUE) { verifyclass(X, "ppp") if(!(is.logical(join) && length(join) == 1)) stop("join should be a single logical value") plot(X, add=add, pch=16) if(is.null(m)) { m <- matrix(FALSE, npoints(X), npoints(X)) } else { stopifnot(is.matrix(m) && is.logical(m)) stopifnot(all(dim(m) == npoints(X))) from <- as.vector(row(m)[m]) to <- as.vector(col(m)[m]) with(X, segments(x[from], y[from], x[to], y[to])) } while(TRUE) { twoid <- identify(X, plot=FALSE, n=2) n <- length(twoid) if(n == 0) break if(n == 2) { m[twoid[1L],twoid[2L]] <- m[twoid[2L],twoid[1L]] <- join lines(X$x[twoid], X$y[twoid], ...) } } return(m) } spatstat.linnet/R/linearKmulti.R0000644000176200001440000002536314144334007016402 0ustar liggesusers# # linearKmulti # # $Revision: 1.18 $ $Date: 2020/01/11 04:35:04 $ # # K functions for multitype point pattern on linear network # # linearKdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearKmulti(X, I, J, r=r, correction=correction, ...) correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.dotfun(result, "K", type, i) return(result) } linearKcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearK(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti(X, I, J, r=r, correction=correction, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L" else "net" result <- rebadge.as.crossfun(result, "K", type, i, j) return(result) } linearKmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) # lambdaI <- nI/lengthL # lambdaJ <- nJ/lengthL # compute K denom <- (nI * nJ - nIandJ)/lengthL K <- linearKmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(K, "correction") type <- if(correction == "Ang") "L" else "net" K <- rebadge.as.crossfun(K, "K", type, "I", "J") attr(K, "correction") <- correction return(K) } # ................ inhomogeneous ............................ linearKdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # compute result <- linearKmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) ## relabel correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.dotfun(result, "K", type, i) attr(result, "correction") <- correction return(result) } linearKcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1L] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2L] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearKinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand correction <- attr(result, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" result <- rebadge.as.crossfun(result, "K", type, i, j) attr(result, "correction") <- correction return(result) } linearKmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern np <- npoints(X) lengthL <- volume(domain(X)) # # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X, subset=I, ...) lambdaJ <- getlambda.lpp(lambdaJ, X, subset=J, ...) # compute K weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) K <- linearKmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label correction <- attr(K, "correction") type <- if(correction == "Ang") "L, inhom" else "net, inhom" K <- rebadge.as.crossfun(K, "K", type, "I", "J") # set markers for 'envelope' attr(K, "dangerous") <- union(attr(lambdaI, "dangerous"), attr(lambdaJ, "dangerous")) attr(K, "correction") <- correction return(K) } # .............. internal ............................... linearKmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(correction == "Ang") { fname <- c("K", "list(L, I, J)") ylab <- quote(K[L,I,J](r)) } else { fname <- c("K", "list(net, I, J)") ylab <- quote(K[net,I,J](r)) } # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) K <- fv(df, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname) attr(K, "correction") <- correction return(K) } # ## nI <- sum(I) ## nJ <- sum(J) ## whichI <- which(I) ## whichJ <- which(J) clash <- I & J has.clash <- any(clash) ## compute pairwise distances DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { ## exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(DIJ, r, denom=denom, check=FALSE, fname=fname) K <- rebadge.as.crossfun(K, "K", "net", "I", "J") unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Ang's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountCrossEnds(X, I, J, DIJ, toler) edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(DIJ, r, weights=wt, denom=denom, check=FALSE, fname=fname) ## rebadge and tweak K <- rebadge.as.crossfun(K, "K", "L", "I", "J") fname <- attr(K, "fname") # tack on theoretical value K <- bind.fv(K, data.frame(theo=r), makefvlabel(NULL, NULL, fname, "pois"), "theoretical Poisson %s") ## unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(DIJ=DIJ, wt=wt) attr(K, "correction") <- correction return(K) } DoCountCrossEnds <- function(X, I, J, DIJ, toler) { stopifnot(is.lpp(X)) stopifnot(is.logical(I) && is.logical(J)) stopifnot(is.matrix(DIJ)) nI <- sum(I) nJ <- sum(J) whichI <- which(I) whichJ <- which(J) m <- matrix(1, nI, nJ) easy <- list(is.connected=TRUE) L <- domain(X) if(is.connected(L)) { ## network is connected for(k in seq_len(nJ)) { j <- whichJ[k] I.j <- (whichI != j) i.j <- setdiff(whichI, j) m[I.j, k] <- countends(L, X[i.j], DIJ[I.j,k], toler=toler, internal=easy) } } else { ## network is disconnected - split into components vlab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(vlab)) for(s in subsets) { ## extract one component and the points falling in it Xs <- thinNetwork(X, retainvertices=s) ns <- npoints(Xs) if(ns >= 2) { Ls <- domain(Xs) ## identify which points of X are involved relevant <- attr(Xs, "retainpoints") Xindex <- which(relevant) ## classify them Isub <- I[relevant] ## Jsub <- J[relevant] ## identify relevant submatrix of DIJ rowsub <- relevant[I] colsub <- relevant[J] ## corresponding indices in X ## rowXindex <- whichI[rowsub] ## colXindex <- whichJ[colsub] ## handle for(k in which(colsub)) { j <- whichJ[k] I.j <- rowsub & (whichI != j) i.j <- Isub & (Xindex != j) m[ I.j, k ] <- countends(Ls, Xs[i.j], DIJ[I.j, k], toler=toler, internal=easy) } } } } if(any(uhoh <- (m == 0) & is.finite(DIJ))) { warning("Internal error: disc boundary count equal to zero") m[uhoh] <- 1 } return(m) } spatstat.linnet/R/linearK.R0000644000176200001440000002513014144334007015317 0ustar liggesusers# # linearK # # $Revision: 1.56 $ $Date: 2020/01/11 04:23:26 $ # # K function for point pattern on linear network # # linearK <- function(X, r=NULL, ..., correction="Ang", ratio=FALSE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) np <- npoints(X) lengthL <- volume(domain(X)) denom <- np * (np - 1)/lengthL K <- linearKengine(X, r=r, ..., denom=denom, correction=correction, ratio=ratio) correction <- attr(K, "correction") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(K[L](r)) fname <- c("K", "L") }, none = { ylab <- quote(K[net](r)) fname <- c("K", "net") }) K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname) attr(K, "correction") <- correction return(K) } linearKinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE, normpower=1, update=TRUE, leaveoneout=TRUE, ratio=FALSE) { stopifnot(inherits(X, "lpp")) loo.given <- !missing(leaveoneout) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearK(X, r=r, ..., ratio=ratio, correction=correction) if(normalise) { check.1.real(normpower) stopifnot(normpower >= 1) } lambdaX <- getlambda.lpp(lambda, X, ..., update=update, leaveoneout=leaveoneout, loo.given=loo.given, lambdaname="lambda") invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") lengthL <- volume(domain(X)) denom <- if(!normalise) lengthL else if(normpower == 1) sum(invlam) else lengthL * (sum(invlam)/lengthL)^normpower K <- linearKengine(X, reweight=invlam2, denom=denom, r=r, correction=correction, ratio=ratio, ...) # set appropriate y axis label correction <- attr(K, "correction") switch(correction, Ang = { ylab <- quote(K[L, inhom](r)) yexp <- quote(K[list(L, "inhom")](r)) fname <- c("K", "list(L, inhom)") }, none = { ylab <- quote(K[net, inhom](r)) yexp <- quote(K[list(net, "inhom")](r)) fname <- c("K", "list(net, inhom)") }) K <- rebadge.fv(K, new.fname=fname, new.ylab=ylab, new.yexp=yexp) attr(K, "correction") <- correction attr(K, "dangerous") <- attr(lambdaX, "dangerous") return(K) } getlambda.lpp <- function(lambda, X, subset=NULL, ..., update=TRUE, leaveoneout=TRUE, loo.given=TRUE, lambdaname) { missup <- missing(update) if(missing(lambdaname)) lambdaname <- deparse(substitute(lambda)) Y <- if(is.null(subset)) X else X[subset] danger <- TRUE if(is.ppm(lambda) || is.lppm(lambda)) { ## fitted model if(update) { ## refit the model to the full dataset X lambda <- if(is.lppm(lambda)) update(lambda, X) else update(lambda, as.ppp(X)) ## now evaluate lambdaX <- fitted(lambda, dataonly=TRUE, leaveoneout=leaveoneout) ## restrict if required lambdaY <- if(is.null(subset)) lambdaX else lambdaX[subset] ## danger <- FALSE if(missup) warn.once("lin.inhom.update", "The behaviour of linearKinhom and similar functions", "when lambda is an lppm object", "has changed in spatstat 1.41-0,", "and again in spatstat 1.52-0.", "See help(linearKinhom)") } else { if(loo.given && leaveoneout) stop("leave-one-out calculation for fitted models is only available when update=TRUE", call.=FALSE) lambdaY <- predict(lambda, locations=as.data.frame(as.ppp(Y))) } } else { ## lambda is some other kind of object lambdaY <- if(is.vector(lambda)) lambda else if(inherits(lambda, "linfun")) lambda(Y, ...) else if(inherits(lambda, "linim")) lambda[Y, drop=FALSE] else if(is.function(lambda)) { coo <- coords(Y) do.call.matched(lambda, list(x=coo$x, y=coo$y, ...)) } else if(is.im(lambda)) safelookup(lambda, as.ppp(Y)) else stop(paste(lambdaname, "should be", "a numeric vector, function, pixel image, or fitted model")) } if(!is.numeric(lambdaY)) stop(paste("Values of", lambdaname, "are not numeric")) if((nv <- length(lambdaY)) != (np <- npoints(Y))) stop(paste("Obtained", nv, "values of", lambdaname, "but point pattern contains", np, "points")) if(any(lambdaY < 0)) stop(paste("Negative values of", lambdaname, "obtained")) if(any(lambdaY == 0)) stop(paste("Zero values of", lambdaname, "obtained")) if(danger) attr(lambdaY, "dangerous") <- lambdaname return(lambdaY) } linearKengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", ratio=FALSE, showworking=FALSE) { # ensure distance information is present X <- as.lpp(X, sparse=FALSE) # extract info about pattern np <- npoints(X) # extract linear network L <- domain(X) W <- Window(L) # determine r values rmaxdefault <- 0.98 * boundingradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # type <- if(correction == "Ang") "L" else "net" fname <- c("K", type) ylab <- substitute(K[type](r), list(type=type)) # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) K <- ratfv(df, NULL, 0, "r", ylab, "est", . ~ r, c(0, rmax), c("r", makefvlabel(NULL, "hat", fname)), c("distance argument r", "estimated %s"), fname = fname, ratio=ratio) unitname(K) <- unitname(X) if(correction == "Ang") { # tack on theoretical value K <- bind.ratfv(K, quotient = data.frame(theo=r), denominator = 0, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s", ratio = ratio) } attr(K, "correction") <- correction return(K) } # compute pairwise distances D <- pairdist(X) #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(D, r, denom=denom, fname=fname, ratio=ratio) K <- rebadge.fv(K, ylab, fname) unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } if(correction == "none") { edgewt <- 1 } else { ## inverse m weights (Wei's correction) ## determine tolerance toler <- default.linnet.tolerance(L) ## compute m[i,j] m <- DoCountEnds(X, D, toler) edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(D, r, weights=wt, denom=denom, fname=fname, ratio=ratio) # tack on theoretical value if(ratio) { K <- bind.ratfv(K, quotient = data.frame(theo = r), denominator = denom, labl = makefvlabel(NULL, NULL, fname, "theo"), desc = "theoretical Poisson %s") } else { K <- bind.fv(K, data.frame(theo=r), makefvlabel(NULL, NULL, fname, "theo"), "theoretical Poisson %s") } K <- rebadge.fv(K, ylab, fname) unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(D=D, wt=wt) attr(K, "correction") <- correction return(K) } ApplyConnected <- function(X, Engine, r=NULL, ..., rule, auxdata=NULL) { # Apply 'Engine' to each connected component of domain(X) stopifnot(is.function(rule)) # Ensure distance information is present X <- as.lpp(X, sparse=FALSE) L <- domain(X) # check network connectivity br <- boundingradius(L) if(disco <- is.infinite(br)) { # disconnected network XX <- connected(X) LL <- lapply(XX, domain) br <- max(sapply(LL, boundingradius)) } else XX <- NULL # determine r values rmaxdefault <- 0.98 * br breaks <- handle.r.b.args(r, NULL, Window(L), rmaxdefault=rmaxdefault) r <- breaks$r if(!disco) { # single connected network stuff <- rule(X=X, auxdata=auxdata, ...) result <- do.call(Engine, append(list(X=X, r=r), stuff)) return(result) } # disconnected network nsub <- length(XX) results <- anylist() denoms <- numeric(nsub) for(i in seq_len(nsub)) { X.i <- XX[[i]] sub.i <- attr(X.i, "retainpoints") # identifies which points of X aux.i <- if(length(auxdata) == 0) NULL else lapply(auxdata, marksubset, index=sub.i) stuff.i <- rule(X=X.i, auxdata=aux.i, ...) denoms[i] <- stuff.i$denom %orifnull% 1 results[[i]] <- do.call(Engine, append(list(X=X.i, r=r), stuff.i)) } result <- do.call(pool, append(results, list(weights=denoms, relabel=FALSE, variance=FALSE))) return(result) } DoCountEnds <- function(X, D, toler) { stopifnot(is.lpp(X)) stopifnot(is.matrix(D)) nX <- npoints(X) if(nrow(D) != nX) stopifnot(nrow(D) == npoints(X)) if(ncol(D) != nX) stopifnot(ncol(D) == npoints(X)) m <- matrix(1, nX, nX) easy <- list(is.connected=TRUE) L <- domain(X) if(is.connected(L)) { ## network is connected for(j in 1:nX) { m[ -j, j] <- countends(L, X[-j], D[-j,j], toler=toler, internal=easy) } } else { ## network is disconnected - split into components vlab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), factor(vlab)) for(subi in subsets) { ## extract one component and the points falling in it Xsubi <- thinNetwork(X, retainvertices=subi) ni <- npoints(Xsubi) if(ni >= 2) { Lsubi <- domain(Xsubi) ## identify which points of X are involved imap <- which(attr(Xsubi, "retainpoints")) ## handle for(j in seq_len(ni)) { ij <- imap[j] i.j <- imap[-j] m[ i.j, ij ] <- countends(Lsubi, Xsubi[-j], D[i.j, ij], toler=toler, internal=easy) } } } } if(any(uhoh <- (m == 0) & is.finite(D))) { warning("Internal error: disc boundary count equal to zero") m[uhoh] <- 1 } return(m) } spatstat.linnet/R/Math.linimlist.R0000644000176200001440000000337514144334007016635 0ustar liggesusers## ## Math.linimlist.R ## ## $Revision: 1.1 $ $Date: 2020/10/31 08:47:12 $ ## Math.linimlist <- function(x, ...){ solapply(x, .Generic, ...) } Complex.linimlist <- function(z){ solapply(z, .Generic) } Summary.linimlist <- function(..., na.rm=TRUE){ argh <- expandSpecialLists(list(...)) if(length(names(argh)) > 0) { isim <- sapply(argh, is.im) names(argh)[isim] <- "" } do.call(.Generic, c(argh, list(na.rm=na.rm))) } #' Due to the dispatch mechanism, Ops.linim and Ops.linimlist must be identical #' if we want to handle combinations of linimlist and im. #' (See 'Math.linim.R' for the definition of 'LinimOp') Ops.linimlist <- Ops.linim <- function(e1,e2=NULL){ LinimListOp(e1, e2, .Generic) } LinimListOp <- function(e1, e2=NULL, op) { if(is.null(e2)) { #' unary operation result <- if(is.im(e1)) LinimOp(e1, op=op) else solapply(e1, LinimOp, op=op) return(result) } #' binary operation single1 <- !inherits(e1, c("linimlist", "solist")) single2 <- !inherits(e2, c("linimlist", "solist")) if(single1 && single2) return(LinimOp(e1, e2, op)) if(single1 && !single2) { e1list <- rep(list(e1), length(e2)) e2list <- e2 outnames <- names(e2) } else if(!single1 && single2) { e1list <- e1 e2list <- rep(list(e2), length(e1)) outnames <- names(e1) } else { e1list <- e1 e2list <- e2 if(length(e1) != length(e2)) stop(paste("Lists of images have incompatible lengths:", length(e1), "!=", length(e2)), call.=FALSE) outnames <- names(e1) %orifnull% names(e2) } #' compute v <- mapply(LinimOp, e1=unname(e1list), e2=unname(e2list), MoreArgs=list(op=op), SIMPLIFY=FALSE) names(v) <- outnames return(as.solist(v)) } spatstat.linnet/R/rhohatlpp.R0000644000176200001440000000501314144334007015731 0ustar liggesusers#' #' rhohatlpp.R #' #' rhohat.lpp and rhohat.lppm #' #' Moved from rhohat.R to separate file rhohatlpp.R on 16 june 2020 #' #' Copyright (c) Adrian Baddeley 2015-2019 #' GNU Public Licence GPL >= 2.0 rhohat.lpp <- rhohat.lppm <- function(object, covariate, ..., weights=NULL, method=c("ratio", "reweight", "transform"), horvitz=FALSE, smoother=c("kernel", "local", "decreasing", "increasing", "piecewise"), subset=NULL, nd=1000, eps=NULL, random=TRUE, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, positiveCI, breaks=NULL) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(positiveCI)) positiveCI <- (smoother == "local") if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.lpp(object)) { X <- object model <- lppm(object, ~1, eps=eps, nd=nd, random=random) reference <- "Lebesgue" modelcall <- NULL } else if(inherits(object, "lppm")) { model <- object X <- model$X reference <- "model" modelcall <- model$call } else stop("object should be of class lpp or lppm") if("baseline" %in% names(list(...))) warning("Argument 'baseline' ignored: not available for ", if(is.lpp(object)) "rhohat.lpp" else "rhohat.lppm") if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(X) } else { covunits <- NULL } S <- as.psp(as.linnet(X)) if(!is.null(subset)) S <- S[subset] totlen <- sum(lengths_psp(S)) rhohatEngine(model, covariate, reference, totlen, ..., subset=subset, weights=weights, method=method, horvitz=horvitz, smoother=smoother, resolution=list(nd=nd, eps=eps, random=random), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, positiveCI=positiveCI, breaks=breaks, modelcall=modelcall, callstring=callstring) } spatstat.linnet/R/clicklpp.R0000644000176200001440000000341514144334010015527 0ustar liggesusers#' #' $Revision: 1.1 $ $Date: 2017/06/05 10:31:58 $ #' clicklpp <- local({ clicklpp <- function(L, n=NULL, types=NULL, ..., add=FALSE, main=NULL, hook=NULL) { if(!inherits(L, "linnet")) stop("L should be a linear network", call.=FALSE) instructions <- if(!is.null(n)) paste("click", n, "times in window") else paste("add points: click left mouse button in window\n", "exit: press ESC or another mouse button") if(is.null(main)) main <- instructions W <- Window(L) #### single type ######################### if(is.null(types)) { plot(L, add=add, main=main) if(!is.null(hook)) plot(hook, add=TRUE) xy <- if(!is.null(n)) spatstatLocator(n=n, ...) else spatstatLocator(...) ok <- inside.owin(xy, w=W) if((nbad <- sum(!ok)) > 0) warning(paste("Ignored", nbad, ngettext(nbad, "point", "points"), "outside window"), call.=FALSE) X <- as.lpp(xy$x[ok], xy$y[ok], L=L) return(X) } ##### multitype ####################### ftypes <- factor(types, levels=types) #' input points of type 1 X <- getem(ftypes[1L], instructions, n=n, L=L, add=add, ..., pch=1) X <- X %mark% ftypes[1L] #' input points of types 2, 3, ... in turn for(i in 2:length(types)) { Xi <- getem(ftypes[i], instructions, n=n, L=L, add=add, ..., hook=X, pch=i) Xi <- Xi %mark% ftypes[i] X <- superimpose(X, Xi, L=L) } if(!add) plot(X, main="Final pattern") return(X) } getem <- function(i, instr, ...) { main <- paste("Points of type", sQuote(i), "\n", instr) do.call(clicklpp, resolve.defaults(list(...), list(main=main))) } clicklpp }) spatstat.linnet/R/crossdistlpp.R0000644000176200001440000001271314144334007016466 0ustar liggesusers# # crossdistlpp.R # # $Revision: 1.12 $ $Date: 2021/01/07 03:54:27 $ # # crossdist.lpp # Calculates the shortest-path distance from each point of X # to each point of Y, where X and Y are point patterns # on the same linear network. # crossdist.lpp <- function(X, Y, ..., method="C", check=TRUE) { stopifnot(inherits(X, "lpp")) method <- match.arg(method, c("C", "interpreted")) L <- as.linnet(X) if(check) { LY <- as.linnet(Y) if(!identical(L, LY)) stop("X and Y are on different linear networks") } nX <- npoints(X) nY <- npoints(Y) crossdistmat <- matrix(Inf,nX,nY) if(!is.connected(L)) { #' disconnected network lab <- connected(L, what="labels") subsets <- split(seq_len(nvertices(L)), lab) for(subi in subsets) { Xi <- thinNetwork(X, retainvertices=subi) Yi <- thinNetwork(Y, retainvertices=subi) whichX <- attr(Xi, "retainpoints") whichY <- attr(Yi, "retainpoints") crossdistmat[whichX, whichY] <- crossdist.lpp(Xi, Yi, method=method) } return(crossdistmat) } ## ----------- network is connected ------------------------ ## Extract network data Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath nseg <- length(from) sparse <- L$sparse || is.null(dpath) ## Extract point coordinates P <- as.ppp(X) Q <- as.ppp(Y) ## local coordinates cooX <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) cooY <- coords(Y, local=TRUE, spatial=FALSE, temporal=FALSE) Xseg <- cooX$seg Yseg <- cooY$seg ## if(sparse) { ## new C code for sparse representation tX <- cooX$tp tY <- cooY$tp ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L Xseg0 <- Xseg - 1L Yseg0 <- Yseg - 1L ## sort each set of points by increasing segment index ordX <- order(Xseg0, tX) Xseg0 <- Xseg0[ordX] tX <- tX[ordX] ordY <- order(Yseg0, tY) Yseg0 <- Yseg0[ordY] tY <- tY[ordY] ## network info seglen <- lengths_psp(L$lines) huge <- 2 * sum(seglen) tol <- L$toler %orifnull% default.linnet.tolerance(L) ## zz <- .C(SL_linScrossdist, np = as.integer(nX), sp = as.integer(Xseg0), tp = as.double(tX), nq = as.integer(nY), sq = as.integer(Yseg0), tq = as.double(tY), nv = as.integer(Lvert$n), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), seglen = as.double(seglen), huge = as.double(huge), tol = as.double(tol), dist = as.double(numeric(nX * nY)), PACKAGE="spatstat.linnet") crossdistmat[ordX, ordY] <- zz$dist } else { switch(method, C = { ## older C code requiring non-sparse representation ## convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L Xsegmap <- Xseg - 1L Ysegmap <- Yseg - 1L zz <- .C(SL_lincrossdist, np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.integer(nseg), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), answer = as.double(numeric(nX * nY)), PACKAGE="spatstat.linnet") crossdistmat <- matrix(zz$answer, nX, nY) }, interpreted = { #' interpreted code #' loop through all pairs of data points for (i in 1:nX) { Xsegi <- Xseg[i] Xi <- P[i] nbi1 <- from[Xsegi] nbi2 <- to[Xsegi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in 1:nY) { Yj <- Q[j] Ysegj <- Yseg[j] if(Xsegi == Ysegj) { #' points i and j lie on the same segment #' use Euclidean distance d <- crossdist(Xi, Yj) } else { #' shortest path from i to j passes through ends of segments nbj1 <- from[Ysegj] nbj2 <- to[Ysegj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] #' Calculate shortest of 4 possible paths from i to j d1Yj <- crossdist(vj1,Yj) d2Yj <- crossdist(vj2,Yj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Yj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Yj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Yj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Yj d <- min(d11,d12,d21,d22) } #' store result crossdistmat[i,j] <- d } } } ) } return(crossdistmat) } spatstat.linnet/R/treebranches.R0000644000176200001440000001361614144334007016405 0ustar liggesusers#' #' treebranches.R #' #' Label branches in a tree #' #' $Revision: 1.5 $ $Date: 2019/01/20 05:26:51 $ #' compute branch labels for each *vertex* in the tree L treebranchlabels <- local({ treebranchlabels <- function(L, root=1) { stopifnot(inherits(L, "linnet")) stopifnot(length(root) == 1) V <- L$vertices #' M <- L$m #' assign label to each vertex e <- rep(NA_character_, npoints(V)) #' do root e[root] <- "" #' recurse descendtree(L, root, e) } descendtree <- function(L, at, labels, verbose=FALSE) { if(verbose) cat(paste("Descending from node", at, "\n")) below <- which(L$m[at, ] & is.na(labels)) while(length(below) == 1) { if(verbose) cat(paste("Line from", at, paren(labels[at]), "to", below, "\n")) labels[below] <- labels[at] at <- below below <- which(L$m[at, ] & is.na(labels)) } if(length(below) == 0) { if(verbose) cat("*\n") return(labels) } if(verbose) cat(paste("split into", length(below), "\n")) if(length(below) > 26) stop("Oops - degree > 27") labels[below] <- paste(labels[at], letters[1:length(below)], sep="") for(b in below) labels <- descendtree(L, b, labels) return(labels) } treebranchlabels }) #' Function which will return the branch label associated with #' any point on the network branchlabelfun <- function(L, root=1) { L <- as.linnet(L) vertexLabels <- treebranchlabels(L, root=root) labfrom <- vertexLabels[L$from] labto <- vertexLabels[L$to] segmentLabels <- ifelse(nchar(labfrom) < nchar(labto), labto, labfrom) f <- function(x, y, seg, tp) { segmentLabels[seg] } fL <- linfun(f, L) return(fL) } #' convenience function for use in model formulae begins <- function(x, firstbit) { stopifnot(is.character(firstbit) && length(firstbit) == 1) n <- nchar(firstbit) if(n == 0) rep(TRUE, length(x)) else (substr(x, 1, n) == firstbit) } #' extract the sub-tree for a particular label #' e.g. extractbranch(L, "a") extracts everything whose label begins with 'a' extractbranch <- function(X, ...) { UseMethod("extractbranch") } extractbranch.linnet <- function(X, code, labels, ..., which=NULL) { L <- X V <- L$vertices if(!is.null(which)) { stopifnot(is.logical(which)) if(length(which) != npoints(V)) stop("Argument 'which' is the wrong length") vin <- which } else { if(length(labels) != npoints(V)) stop("labels vector is the wrong length") #' which vertices are included #' (a) vertices with the right initial code vin <- (substr(labels, 1, nchar(code)) == code) #' (b) the apex isneighbour <- (rowSums(L$m[,vin]) > 0) apexcode <- if(nchar(code) > 1) substr(code, 1, nchar(code)-1) else "" vin <- vin | (isneighbour & (labels == apexcode)) } #' which edges are included ein <- vin[L$from] & vin[L$to] #' new serial numbers for vertices vId <- cumsum(vin) #' pack up sparse <- L$sparse out <- list(vertices=V[vin], m=L$m[vin,vin], lines=L$lines[ein], from=vId[L$from[ein]], to=vId[L$to[ein]], dpath=if(sparse) NULL else L$dpath[vin,vin], sparse=sparse, window=V$window) class(out) <- c("linnet", class(out)) #' pre-compute bounding radius if(sparse) out$boundingradius <- boundingradius(out) out$toler <- default.linnet.tolerance(out) attr(out, "which") <- vin return(out) } extractbranch.lpp <- function(X, code, labels, ..., which=NULL) { L <- as.linnet(X) #' make sub-network if(missing(code)) code <- NULL if(missing(labels)) labels <- NULL Lnew <- extractbranch(L, code, labels, which=which) #' which vertices are included vin <- attr(Lnew, "which") #' which edges are included ein <- vin[L$from] & vin[L$to] #' which data points are included xin <- ein[coords(X)$seg] #' new serial numbers for edges eId <- cumsum(ein) #' construct subset Xnew <- X[xin] Xnew$domain <- Lnew #' apply new serial numbers to segment map coords(Xnew)$seg <- eId[coords(Xnew)$seg] #' return(Xnew) } deletebranch <- function(X, ...) { UseMethod("deletebranch") } deletebranch.linnet <- function(X, code, labels, ...) { L <- X V <- L$vertices if(length(labels) != npoints(V)) stop("labels vector is the wrong length") #' which vertices are retained vkeep <- (substr(labels, 1, nchar(code)) != code) #' which edges are retained ekeep <- vkeep[L$from] & vkeep[L$to] #' new serial numbers for vertices vId <- cumsum(vkeep) #' pack up sparse <- L$sparse out <- list(vertices=V[vkeep], m=L$m[vkeep,vkeep], lines=L$lines[ekeep], from=vId[L$from[ekeep]], to=vId[L$to[ekeep]], dpath=if(sparse) NULL else L$dpath[vkeep,vkeep], sparse=sparse, window=V$window) class(out) <- c("linnet", class(out)) #' recompute bounding radius if(sparse) out$boundingradius <- boundingradius(out) out$toler <- default.linnet.tolerance(out) attr(out, "which") <- vkeep return(out) } deletebranch.lpp <- function(X, code, labels, ...) { #' make sub-network L <- as.linnet(X) Lnew <- deletebranch(L, code=code, labels=labels) #' which vertices are retained vkeep <- attr(Lnew, "which") #' which edges are retained ekeep <- vkeep[L$from] & vkeep[L$to] #' which data points are retained xin <- ekeep[coords(X)$seg] #' new serial numbers for vertices # vId <- cumsum(vkeep) #' new serial numbers for edges eId <- cumsum(ekeep) #' construct subset Xnew <- X[xin] Xnew$domain <- Lnew #' apply new serial numbers to segment map coords(Xnew)$seg <- eId[coords(Xnew)$seg] #' return(Xnew) } treeprune <- function(X, root=1, level=0){ ## collect names of branches to be pruned tb <- treebranchlabels(as.linnet(X), root=root) keep <- (nchar(tb) <= level) Y <- extractbranch(X, which=keep) return(Y) } spatstat.linnet/R/linim.R0000644000176200001440000006065114144334007015051 0ustar liggesusers# # linim.R # # $Revision: 1.80 $ $Date: 2021/08/25 08:19:56 $ # # Image/function on a linear network # linim <- function(L, Z, ..., restrict=TRUE, df=NULL) { L <- as.linnet(L) stopifnot(is.im(Z)) class(Z) <- "im" # prevent unintended dispatch dfgiven <- !is.null(df) if(dfgiven) { stopifnot(is.data.frame(df)) neednames <- c("xc", "yc", "x", "y", "mapXY", "tp", "values") ok <- neednames %in% names(df) dfcomplete <- all(ok) if(!dfcomplete) { #' omission of "values" column is permissible, but not other columns mapnames <- setdiff(neednames, "values") if(!all(mapnames %in% names(df))) { nn <- sum(!ok) stop(paste(ngettext(nn, "A column", "Columns"), "named", commasep(sQuote(neednames[!ok])), ngettext(nn, "is", "are"), "missing from argument", sQuote("df"))) } } } if(restrict) { #' restrict image to pixels actually lying on the network M <- as.mask.psp(as.psp(L), Z) if(dfgiven) { #' ensure all mapped pixels are untouched pos <- nearest.pixel(df$xc, df$yc, Z) pos <- cbind(pos$row, pos$col) M$m[pos] <- TRUE } Z <- Z[M, drop=FALSE] } if(!dfgiven) { # compute the data frame of mapping information xx <- rasterx.im(Z) yy <- rastery.im(Z) mm <- !is.na(Z$v) xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(Z), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, as.psp(L)) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) # extract values values <- Z[pixelcentres] # bundle df <- cbind(pixdf, projloc, projmap, data.frame(values=values)) } else if(!dfcomplete) { #' look up values pixelcentres <- ppp(df$xc, df$yc, window=as.rectangle(Z), check=FALSE) df$values <- safelookup(Z, pixelcentres) } out <- Z attr(out, "L") <- L attr(out, "df") <- df class(out) <- c("linim", class(out)) return(out) } is.linim <- function(x) { inherits(x, "linim") } print.linim <- function(x, ...) { splat("Image on linear network") L <- attr(x, "L") Lu <- summary(unitname(L)) nsample <- nrow(attr(x, "df")) print(L) NextMethod("print") if(!is.null(nsample)) splat(" Data frame:", nsample, "sample points along network", "\n", "Average density: one sample point per", signif(volume(L)/nsample, 3), Lu$plural, Lu$explain) return(invisible(NULL)) } summary.linim <- function(object, ...) { y <- NextMethod("summary") if("integral" %in% names(y)) y$integral <- integral(object) y$network <- summary(as.linnet(object)) class(y) <- c("summary.linim", class(y)) return(y) } print.summary.linim <- function(x, ...) { splat(paste0(x$type, "-valued"), "pixel image on a linear network") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural sigdig <- getOption('digits') di <- x$dim win <- x$window splat(di[1L], "x", di[2L], "pixel array (ny, nx)") splat("enclosing rectangle:", prange(signif(win$xrange, sigdig)), "x", prange(signif(win$yrange, sigdig)), unitinfo$plural, unitinfo$explain) splat("dimensions of each pixel:", signif(x$xstep, 3), "x", signif(x$ystep, sigdig), pluralunits) if(!is.null(explain <- unitinfo$explain)) splat(explain) splat("Pixel values (on network):") switch(x$type, integer=, real={ splat("\trange =", prange(signif(x$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, factor={ print(x$table) }, complex={ splat("\trange: Real", prange(signif(x$Re$range, sigdig)), "Imaginary", prange(signif(x$Im$range, sigdig))) splat("\tintegral =", signif(x$integral, sigdig)) splat("\tmean =", signif(x$mean, sigdig)) }, { print(x$summary) }) splat("Underlying network:") print(x$network) return(invisible(NULL)) } plot.linim <- local({ plot.linim <- function(x, ..., style=c("colour", "width"), scale, adjust=1, fatten=0, negative.args=list(col=2), legend=TRUE, leg.side=c("right", "left", "bottom", "top"), leg.sep=0.1, leg.wid=0.1, leg.args=list(), leg.scale=1, zlim, box=FALSE, do.plot=TRUE) { xname <- short.deparse(substitute(x)) style <- match.arg(style) leg.side <- match.arg(leg.side) check.1.real(leg.scale) force(x) if(!missing(fatten)) { check.1.real(fatten) if(fatten != 0 && style == "width") warning("Argument 'fatten' is ignored when style='width'", call.=FALSE) stopifnot(fatten >= 0) } if(missing(zlim) || is.null(zlim)) { zlim <- NULL zliminfo <- list() } else { check.range(zlim) stopifnot(all(is.finite(zlim))) zliminfo <- list(zlim=zlim) } ribstuff <- list(ribbon = legend, ribside = leg.side, ribsep = leg.sep, ribwid = leg.wid, ribargs = leg.args, ribscale = leg.scale) if(style == "colour" || !do.plot) { #' colour style: plot as pixel image if(fatten > 0) { #' first fatten the lines L <- attr(x, "L") S <- as.psp(L) D <- distmap(as.mask.psp(S, xy=x)) fatwin <- levelset(D, fatten) x <- nearestValue(x)[fatwin, drop=FALSE] } return(do.call(plot.im, resolve.defaults(list(quote(x)), list(...), ribstuff, zliminfo, list(main=xname, legend=legend, do.plot=do.plot, box=box)))) } #' width style L <- attr(x, "L") df <- attr(x, "df") Llines <- as.psp(L) W <- as.owin(L) #' ensure function values are numeric vals <- try(as.numeric(df$values)) if(inherits(vals, "try-error")) stop("Function values should be numeric: unable to convert them", call.=FALSE) #' convert non-finite values to zero width vals[!is.finite(vals)] <- 0 df$values <- vals #' plan layout if(legend) { #' use layout procedure in plot.im z <- do.call(plot.im, resolve.defaults(list(quote(x), do.plot=FALSE, ribbon=TRUE), list(...), ribstuff, list(main=xname, valuesAreColours=FALSE))) bb.all <- attr(z, "bbox") bb.leg <- attr(z, "bbox.legend") } else { bb.all <- Frame(W) bb.leg <- NULL } legend <- !is.null(bb.leg) if(legend) { #' expand plot region to accommodate text annotation in legend if(leg.side %in% c("left", "right")) { delta <- 2 * sidelengths(bb.leg)[1] xmargin <- if(leg.side == "right") c(0, delta) else c(delta, 0) bb.all <- grow.rectangle(bb.all, xmargin=xmargin) } } #' initialise plot bb <- do.call.matched(plot.owin, resolve.defaults(list(x=quote(bb.all), type="n"), list(...), list(main=xname)), extrargs="type") if(box) plot(Frame(W), add=TRUE) #' resolve graphics parameters for polygons names(negative.args) <- paste0(names(negative.args), ".neg") grafpar <- resolve.defaults(negative.args, list(...), list(col=1), .MatchNull=FALSE) #' rescale values to a plottable range if(is.null(zlim)) zlim <- range(x, finite=TRUE) vr <- range(0, zlim) if(missing(scale)) { maxsize <- mean(distmap(Llines))/2 scale <- maxsize/max(abs(vr)) } df$values <- adjust * scale * df$values/2 #' examine sign of values signtype <- if(vr[1] >= 0) "positive" else if(vr[2] <= 0) "negative" else "mixed" #' split data by segment mapXY <- factor(df$mapXY, levels=seq_len(Llines$n)) dfmap <- split(df, mapXY, drop=TRUE) #' sort each segment's data by position along segment dfmap <- lapply(dfmap, sortalongsegment) #' plot each segment's data Lperp <- angles.psp(Llines) + pi/2 Lfrom <- L$from Lto <- L$to Lvert <- L$vertices Ljoined <- (vertexdegree(L) > 1) #' precompute coordinates of dodecagon dodo <- disc(npoly=12)$bdry[[1L]] #' for(i in seq(length(dfmap))) { z <- dfmap[[i]] segid <- unique(z$mapXY)[1L] xx <- z$x yy <- z$y vv <- z$values #' add endpoints of segment ileft <- Lfrom[segid] iright <- Lto[segid] leftend <- Lvert[ileft] rightend <- Lvert[iright] xx <- c(leftend$x, xx, rightend$x) yy <- c(leftend$y, yy, rightend$y) vv <- c(vv[1L], vv, vv[length(vv)]) rleft <- vv[1L] rright <- vv[length(vv)] ## first add dodecagonal 'joints' if(Ljoined[ileft] && rleft != 0) drawSignedPoly(x=rleft * dodo$x + leftend$x, y=rleft * dodo$y + leftend$y, grafpar, sign(rleft)) if(Ljoined[iright] && rright != 0) drawSignedPoly(x=rright * dodo$x + rightend$x, y=rright * dodo$y + rightend$y, grafpar, sign(rright)) ## Now render main polygon ang <- Lperp[segid] switch(signtype, positive = drawseg(xx, yy, vv, ang, grafpar), negative = drawseg(xx, yy, vv, ang, grafpar), mixed = { ## find zero-crossings xing <- (diff(sign(vv)) != 0) ## excursions excu <- factor(c(0, cumsum(xing))) elist <- split(data.frame(xx=xx, yy=yy, vv=vv), excu) ## plot each excursion for(e in elist) with(e, drawseg(xx, yy, vv, ang, grafpar)) }) } result <- adjust * scale attr(result, "bbox") <- bb if(legend) { attr(result, "bbox.legend") <- bb.leg plotWidthMap(bb.leg = bb.leg, zlim = zlim, phys.scale = adjust * scale, leg.scale = leg.scale, leg.side = leg.side, leg.args = leg.args, grafpar = grafpar) } return(invisible(result)) } drawseg <- function(xx, yy, vv, ang, pars) { ## draw polygon around segment sgn <- sign(mean(vv)) xx <- c(xx, rev(xx)) yy <- c(yy, rev(yy)) vv <- c(vv, -rev(vv)) xx <- xx + cos(ang) * vv yy <- yy + sin(ang) * vv drawSignedPoly(xx, yy, pars, sgn) invisible(NULL) } plot.linim }) sortalongsegment <- function(df) { df[fave.order(df$tp), , drop=FALSE] } as.im.linim <- function(X, ...) { attr(X, "L") <- attr(X, "df") <- NULL class(X) <- "im" if(length(list(...)) > 0) X <- as.im(X, ...) return(X) } as.linim <- function(X, ...) { UseMethod("as.linim") } as.linim.default <- function(X, L, ..., eps = NULL, dimyx = NULL, xy = NULL, delta = NULL, nd = NULL) { stopifnot(inherits(L, "linnet")) Y <- as.im(X, W=Frame(L), ..., eps=eps, dimyx=dimyx, xy=xy) M <- as.mask.psp(as.psp(L), as.owin(Y)) Y[complement.owin(M)] <- NA df <- NULL if(!is.null(delta) || !is.null(nd)) { if(is.null(delta)) delta <- volume(L)/nd df <- pointsAlongNetwork(L, delta) pix <- nearest.valid.pixel(df$x, df$y, Y) df$xc <- Y$xcol[pix$col] df$yc <- Y$yrow[pix$row] df$values <- Y$v[cbind(pix$row, pix$col)] df <- df[,c("xc", "yc", "x", "y", "seg", "tp", "values")] names(df)[names(df) == "seg"] <- "mapXY" } if(is.mask(WL <- Window(L)) && !all(sapply(list(eps, dimyx, xy), is.null))) Window(L, check=FALSE) <- as.mask(WL, eps=eps, dimyx=dimyx, xy=xy) out <- linim(L, Y, df=df, restrict=FALSE) return(out) } pointsAlongNetwork <- function(L, delta) { #' sample points evenly spaced along each segment stopifnot(inherits(L, "linnet")) S <- as.psp(L) ns <- nsegments(S) seglen <- lengths_psp(S) ends <- as.data.frame(S) nsample <- pmax(1, ceiling(seglen/delta)) df <- NULL x0 <- ends$x0 y0 <- ends$y0 x1 <- ends$x1 y1 <- ends$y1 for(i in seq_len(ns)) { nn <- nsample[i] + 1L tcut <- seq(0, 1, length.out=nn) tp <- (tcut[-1] + tcut[-nn])/2 x <- x0[i] * (1-tp) + x1[i] * tp y <- y0[i] * (1-tp) + y1[i] * tp df <- rbind(df, data.frame(x=x, y=y, seg=i, tp=tp)) } return(df) } as.linim.linim <- function(X, ...) { if(length(list(...)) == 0) return(X) Y <- as.linim.default(X, as.linnet(X), ...) return(Y) } # analogue of eval.im eval.linim <- function(expr, envir, harmonize=TRUE, warn=TRUE) { sc <- sys.call() # Get names of all variables in the expression e <- as.expression(substitute(expr)) varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") # get the values of the variables if(missing(envir)) { envir <- parent.frame() # WAS: sys.parent() } else if(is.list(envir)) { envir <- list2env(envir, parent=parent.frame()) } vars <- mget(varnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) funs <- mget(funnames, envir=envir, inherits=TRUE, ifnotfound=list(NULL)) # Find out which variables are (linear) images islinim <- unlist(lapply(vars, inherits, what="linim")) if(!any(islinim)) stop("There are no linear images (class linim) in this expression") # .................................... # Evaluate the pixel values using eval.im # .................................... sc[[1L]] <- as.name('eval.im') sc$envir <- envir Y <- eval(sc) # ......................................... # Then evaluate data frame entries if feasible # ......................................... dfY <- NULL linims <- vars[islinim] nlinims <- length(linims) dframes <- lapply(linims, attr, which="df") nets <- lapply(linims, attr, which="L") isim <- unlist(lapply(vars, is.im)) if(!any(isim & !islinim)) { # all images are 'linim' objects # Check that the images refer to the same linear network if(nlinims > 1) { agree <- unlist(lapply(nets[-1L], identical, y=nets[[1L]])) if(!all(agree)) stop(paste("Images do not refer to the same linear network")) } dfempty <- unlist(lapply(dframes, is.null)) if(!any(dfempty)) { # ensure data frames are compatible if(length(dframes) > 1 && ( length(unique(nr <- sapply(dframes, nrow))) > 1 || !allElementsIdentical(dframes, "seg") || !allElementsIdentical(dframes, "tp") )) { # find the one with finest spacing imax <- which.max(nr) # resample the others dframes[-imax] <- lapply(dframes[-imax], resampleNetworkDataFrame, template=dframes[[imax]]) } # replace each image variable by its data frame column of values vars[islinim] <- lapply(dframes, getElement, "values") # now evaluate expression Yvalues <- eval(e, append(vars, funs)) # pack up dfY <- dframes[[1L]] dfY$values <- Yvalues } } result <- linim(nets[[1L]], Y, df=dfY, restrict=FALSE) return(result) } resampleNetworkDataFrame <- function(df, template) { # resample 'df' at the points of 'template' invalues <- df$values insegment <- df$mapXY inteepee <- df$tp out <- template n <- nrow(out) outvalues <- vector(mode = typeof(invalues), length=n) outsegment <- out$mapXY outteepee <- out$tp for(i in seq_len(n)) { relevant <- which(insegment == outsegment[i]) if(length(relevant) > 0) { j <- which.min(abs(inteepee[relevant] - outteepee[i])) outvalues[i] <- invalues[relevant[j]] } } out$values <- outvalues return(out) } as.linnet.linim <- function(X, ...) { attr(X, "L") } "[.linim" <- function(x, i, ..., drop=TRUE) { if(!missing(i) && is.lpp(i)) { n <- npoints(i) result <- vector(mode=typeof(x$v), length=n) if(is.factor(x$v)) { lev <- levels(x$v) result <- factor(result, levels=seq_along(lev), labels=lev) } if(n == 0) return(result) if(!is.null(df <- attr(x, "df"))) { #' use data frame of sample points along network knownseg <- df$mapXY knowntp <- df$tp knownval <- df$values #' extract local coordinates of query points coo <- coords(i) queryseg <- coo$seg querytp <- coo$tp #' match to nearest sample point for(j in 1:n) { relevant <- (knownseg == queryseg[j]) if(!any(relevant)) { result[j] <- NA } else { k <- which.min(abs(knowntp[relevant] - querytp[j])) result[j] <- knownval[relevant][k] } } if(drop && anyNA(result)) result <- result[!is.na(result)] return(result) } #' give up and use pixel image } #' apply subset method for 'im' y <- NextMethod("[") if(!is.im(y)) return(y) # vector of pixel values class(y) <- unique(c("linim", class(y))) #' handle linear network info L <- attr(x, "L") df <- attr(x, "df") #' clip to new window W <- if(!missing(i) && is.owin(i)) i else Window(y) LW <- L[W] df <- df[inside.owin(df$xc, df$yc, W), , drop=FALSE] #' update local coordinates in data frame samplepoints <- ppp(df$x, df$y, window=Frame(W), check=FALSE) a <- project2segment(samplepoints, as.psp(LW)) df$mapXY <- a$mapXY df$tp <- a$tp #' wrap up attr(y, "L") <- LW attr(y, "df") <- df return(y) } "[<-.linim" <- function(x, i, j, value) { y <- NextMethod("[<-") #' extract linear network info L <- attr(x, "L") df <- attr(x, "df") #' propagate *changed* pixel values to sample points pos <- nearest.pixel(df$xc, df$yc, y) pos <- cbind(pos$row, pos$col) yvalue <- y$v[pos] xvalue <- x$v[pos] okx <- !is.na(xvalue) oky <- !is.na(yvalue) changed <- (okx != oky) | (okx & oky & yvalue != xvalue) df$values[changed] <- yvalue[changed] #' restrict main pixel image to network m <- as.mask.psp(L, as.mask(y))$m m[pos] <- TRUE y$v[!m] <- NA #' package up attr(y, "L") <- L attr(y, "df") <- df class(y) <- unique(c("linim", class(y))) return(y) } integral.linim <- function(f, domain=NULL, ...){ verifyclass(f, "linim") if(is.tess(domain)) { result <- sapply(tiles(domain), integral.linim, f = f) if(length(dim(result)) > 1) result <- t(result) return(result) } if(!is.null(domain)) f <- f[domain] #' extract data L <- as.linnet(f) ns <- nsegments(L) df <- attr(f, "df") vals <- df$values seg <- factor(df$mapXY, levels=1:ns) #' ensure each segment has at least one sample point nper <- table(seg) if(any(missed <- (nper == 0))) { missed <- unname(which(missed)) mp <- midpoints.psp(as.psp(L)[missed]) #' nearest pixel value valmid <- safelookup(f, mp) #' concatenate factors seg <- unlist(list(seg, factor(missed, levels=1:ns))) vals <- c(vals, valmid) #' update nper <- table(seg) } #' take average of data on each segment if(!is.complex(vals)) vals <- as.numeric(vals) num <- tapplysum(vals, list(seg), na.rm=TRUE) mu <- num/nper #' weighted sum len <- lengths_psp(as.psp(L)) if(anyNA(vals)) { ## p <- as.numeric(by(!is.na(vals), seg, mean, ..., na.rm=TRUE)) ## p[is.na(p)] <- 0 defined <- as.numeric(!is.na(vals)) pnum <- tapplysum(defined, list(seg), na.rm=FALSE) p <- pnum/nper len <- len * p } return(sum(mu * len)) } mean.linim <- function(x, ...) { trap.extra.arguments(...) integral(x)/sum(lengths_psp(as.psp(as.linnet(x)))) } quantile.linim <- function(x, probs = seq(0,1,0.25), ...) { verifyclass(x, "linim") #' extract data df <- attr(x, "df") L <- as.linnet(x) vals <- df$values #' count sample points on each segment seg <- factor(df$mapXY, levels=1:nsegments(L)) nvals <- table(seg) #' calculate weights len <- lengths_psp(as.psp(L)) iseg <- as.integer(seg) wts <- len[iseg]/nvals[iseg] return(weighted.quantile(vals, wts, probs)) } median.linim <- function(x, ...) { trap.extra.arguments(...) return(unname(quantile(x, 0.5))) } shift.linim <- function (X, ...) { verifyclass(X, "linim") Z <- shift(as.im(X), ...) L <- shift(as.linnet(X), ...) v <- getlastshift(L) df <- attr(X, "df") df[,c("xc","yc")] <- shiftxy(df[,c("xc", "yc")], v) df[,c("x","y")] <- shiftxy(df[,c("x", "y")], v) Y <- linim(L, Z, df=df, restrict=FALSE) return(putlastshift(Y, v)) } affine.linim <- function(X, mat = diag(c(1, 1)), vec = c(0, 0), ...) { Z <- affine(as.im(X), mat=mat, vec=vec, ...) L <- affine(as.linnet(X), mat=mat, vec=vec, ...) df <- attr(X, "df") df[,c("xc","yc")] <- affinexy(df[,c("xc", "yc")], mat=mat, vec=vec) df[,c("x","y")] <- affinexy(df[,c("x", "y")], mat=mat, vec=vec) Y <- linim(L, Z, df=df, restrict=FALSE) return(Y) } scalardilate.linim <- function(X, f, ..., origin=NULL) { trap.extra.arguments(..., .Context = "In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) if (!is.null(origin)) { X <- shift(X, origin = origin) negorig <- getlastshift(X) } else negorig <- c(0, 0) Y <- affine(X, mat = diag(c(f, f)), vec = -negorig) return(Y) } as.data.frame.linim <- function(x, ...) { df <- attr(x, "df") if(!is.na(m <- match("mapXY", colnames(df)))) colnames(df)[m] <- "seg" return(df) } pairs.linim <- function(..., plot=TRUE, eps=NULL) { argh <- list(...) cl <- match.call() ## unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1L]] if(is.list(arg1) && all(sapply(arg1, is.im))) argh <- arg1 } ## identify which arguments are images isim <- sapply(argh, is.im) nim <- sum(isim) if(nim == 0) stop("No images provided") ## separate image arguments from others images <- argh[isim] rest <- argh[!isim] ## identify which arguments are images on a network islinim <- sapply(images, inherits, what="linim") if(!any(islinim)) # shouldn't be here return(pairs.im(argh, plot=plot)) ## determine image names for plotting imnames <- argh$labels %orifnull% names(images) if(length(imnames) != nim || !all(nzchar(imnames))) { #' names not given explicitly callednames <- paste(cl)[c(FALSE, isim, FALSE)] backupnames <- paste0("V", seq_len(nim)) if(length(callednames) != nim) { callednames <- backupnames } else if(any(toolong <- (nchar(callednames) > 15))) { callednames[toolong] <- backupnames[toolong] } imnames <- good.names(imnames, good.names(callednames, backupnames)) } names(images) <- imnames ## choose resolution if(is.null(eps)) { xstep <- min(sapply(images, getElement, name="xstep")) ystep <- min(sapply(images, getElement, name="ystep")) eps <- min(xstep, ystep) } ## extract linear network Z1 <- images[[min(which(islinim))]] L <- as.linnet(Z1) ## construct equally-spaced sample points X <- pointsOnLines(as.psp(L), eps=eps) ## sample each image pixvals <- lapply(images, "[", i=X, drop=FALSE) pixdf <- as.data.frame(pixvals) dont.complain.about(pixdf) ## pairs plot if(plot) { if(nim > 1) { do.call(pairs.default, resolve.defaults(list(x=quote(pixdf)), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels } else { xname <- imnames[1L] pixdf1 <- pixdf[,1L] dont.complain.about(pixdf1) do.call(hist.default, resolve.defaults(list(x=quote(pixdf1)), rest, list(main=paste("Histogram of", xname), xlab=xname))) } } class(pixdf) <- unique(c("plotpairsim", class(pixdf))) attr(pixdf, "eps") <- eps return(invisible(pixdf)) } spatstat.linnet/R/envelopelpp.R0000644000176200001440000001752714144334010016270 0ustar liggesusers# # envelopelpp.R # # $Revision: 1.26 $ $Date: 2019/10/14 04:53:06 $ # # Envelopes for 'lpp' objects # # envelope.lpp <- function(Y, fun=linearK, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.lpp")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } else if(!fix.n && !fix.marks) { # ................................................... # Realisations of complete spatial randomness # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y nY <- npoints(Y) Yintens <- intensity(unmark(Y)) Ymarx <- marks(Y) NETWORK <- Y$domain dont.complain.about(nY, Yintens, NETWORK) ## expression that will be evaluated simexpr <- if(is.null(Ymarx)) { #' unmarked point pattern expression(rpoislpp(Yintens, NETWORK)) } else if(is.null(dim(Ymarx))) { #' single column of marks expression({ A <- rpoislpp(Yintens, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { #' multiple columns of marks expression({ A <- rpoislpp(Yintens, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, , drop=FALSE] }) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else if(!fix.marks) { # Fixed number of points, but random locations and marks # Data pattern X is argument Y X <- Y nY <- npoints(Y) Ymarx <- marks(Y) NETWORK <- Y$domain dont.complain.about(nY, NETWORK) # expression that will be evaluated simexpr <- if(is.null(Ymarx)) { ## unmarked expression(runiflpp(nY, NETWORK)) } else if(is.null(dim(Ymarx))) { ## single column of marks expression({ A <- runiflpp(nY, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j] }) } else { ## multiple columns of marks expression({ A <- runiflpp(nY, NETWORK); j <- sample(nY, npoints(A), replace=TRUE); A %mark% Ymarx[j, ,drop=FALSE] }) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Randomised locations only; # fixed number of points and fixed marks # Data pattern X is argument Y X <- Y nY <- npoints(Y) Ymarx <- marks(Y) NETWORK <- Y$domain # expression that will be evaluated simexpr <- expression(runiflpp(nY, NETWORK) %mark% Ymarx) dont.complain.about(nY, Ymarx, NETWORK) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.lppm <- function(Y, fun=linearK, nsim=99, nrank=1, ..., funargs=list(), funYargs=funargs, simulate=NULL, fix.n=FALSE, fix.marks=FALSE, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, use.theory=NULL, alternative=c("two.sided", "less", "greater"), scale=NULL, clamp=FALSE, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, rejectNA=FALSE, silent=FALSE, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.lppm(Y) if(!is.null(simulate)) { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } else { ## ................................................... ## Simulation of the fitted model Y if(!is.poisson(Y)) stop("Simulation of non-Poisson models is not yet implemented") MODEL <- Y NETWORK <- domain(X) lambdaFit <- predict(MODEL) Xmarx <- marks(X) nX <- if(!is.marked(X)) npoints(X) else table(marks(X)) dont.complain.about(NETWORK, Xmarx, nX) #' if(!fix.n && !fix.marks) { #' Unconstrained simulations LMAX <- if(is.im(lambdaFit)) max(lambdaFit) else sapply(lambdaFit, max) dont.complain.about(LMAX) simexpr <- expression(rpoislpp(lambdaFit, NETWORK, lmax=LMAX)) } else if(!fix.marks && is.marked(X)) { #' Fixed total number of points EN <- sapply(lambdaFit, integral) PROB <- EN/sum(EN) dont.complain.about(PROB) simexpr <- expression( rlpp(as.integer(rmultinom(1L, nX, PROB)), lambdaFit) ) } else { #' Fixed number of points of each type simexpr <- expression(rlpp(nX, lambdaFit)) } #' evaluate in THIS environment simrecipe <- simulrecipe(type = "lppm", expr = simexpr, envir = envir.here, csr = FALSE) } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., funargs=funargs, funYargs=funYargs, verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, use.theory=use.theory, alternative=alternative, scale=scale, clamp=clamp, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, rejectNA=rejectNA, silent=silent, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } spatstat.linnet/R/lppm.R0000644000176200001440000002537014144334007014710 0ustar liggesusers# # lppm.R # # Point process models on a linear network # # $Revision: 1.46 $ $Date: 2020/12/19 05:25:06 $ # lppm <- function(X, ...) { UseMethod("lppm") } lppm.formula <- function(X, interaction=NULL, ..., data=NULL) { ## remember call callstring <- paste(short.deparse(sys.call()), collapse = "") cl <- match.call() ########### INTERPRET FORMULA ############################## if(!inherits(X, "formula")) stop(paste("Argument 'X' should be a formula")) formula <- X if(spatstat.options("expand.polynom")) formula <- expand.polynom(formula) ## check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Formula must have a left hand side")) Yexpr <- formula[[2L]] trend <- formula[c(1L,3L)] ## FIT ####################################### thecall <- call("lppm", X=Yexpr, trend=trend, data=data, interaction=interaction) ncall <- length(thecall) argh <- list(...) nargh <- length(argh) if(nargh > 0) { thecall[ncall + 1:nargh] <- argh names(thecall)[ncall + 1:nargh] <- names(argh) } callenv <- list2env(as.list(data), parent=parent.frame()) result <- eval(thecall, envir=callenv) result$call <- cl result$callstring <- callstring return(result) } lppm.lpp <- function(X, ..., eps=NULL, nd=1000, random=FALSE) { Xname <- short.deparse(substitute(X)) callstring <- paste(short.deparse(sys.call()), collapse = "") cl <- match.call() nama <- names(list(...)) resv <- c("method", "forcefit") if(any(clash <- resv %in% nama)) warning(paste(ngettext(sum(clash), "Argument", "Arguments"), commasep(sQuote(resv[clash])), "must not be used")) stopifnot(inherits(X, "lpp")) Q <- linequad(X, eps=eps, nd=nd, random=random) fit <- ppm(Q, ..., method="mpl", forcefit=TRUE) if(!is.poisson.ppm(fit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=fit, Xname=Xname, call=cl, callstring=callstring) class(out) <- "lppm" return(out) } is.lppm <- function(x) { inherits(x, "lppm") } # undocumented as.ppm.lppm <- function(object) { object$fit } fitted.lppm <- function(object, ..., dataonly=FALSE, new.coef=NULL, leaveoneout=FALSE) { pfit <- object$fit v <- fitted(pfit, dataonly=dataonly, new.coef=new.coef, leaveoneout=leaveoneout) return(v) } predict.lppm <- function(object, ..., type="trend", locations=NULL, new.coef=NULL) { type <- pickoption("type", type, c(trend="trend", cif="cif", lambda="cif")) X <- object$X fit <- object$fit L <- as.linnet(X) if(!is.null(locations)) { #' locations given; return a vector/matrix of predicted values if(is.lpp(locations)) locations <- as.ppp(locations) values <- predict(fit, locations=locations, type=type, new.coef=new.coef) return(values) } # locations not given; want a pixel image # pixellate the lines Llines <- as.psp(L) linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask) # extract pixel centres xx <- rasterx.mask(linemask) yy <- rastery.mask(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) # predict at the projected points if(!is.multitype(fit)) { values <- predict(fit, locations=projloc, type=type, new.coef=new.coef) # map to nearest pixels Z <- lineimage Z[pixelcentres] <- values # attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df, restrict=FALSE) } else { # predict for each type lev <- levels(marks(data.ppm(fit))) out <- list() for(k in seq(length(lev))) { markk <- factor(lev[k], levels=lev) locnk <- cbind(projloc, data.frame(marks=markk)) values <- predict(fit, locations=locnk, type=type, new.coef=new.coef) Z <- lineimage Z[pixelcentres] <- values df <- cbind(projdata, values) out[[k]] <- linim(L, Z, df=df, restrict=FALSE) } out <- as.solist(out) names(out) <- as.character(lev) } return(out) } coef.lppm <- function(object, ...) { coef(object$fit) } print.lppm <- function(x, ...) { splat("Point process model on linear network") print(x$fit) terselevel <- spatstat.options('terse') if(waxlyrical('extras', terselevel)) splat("Original data:", x$Xname) if(waxlyrical('gory', terselevel)) print(as.linnet(x)) return(invisible(NULL)) } summary.lppm <- function(object, ...) { splat("Point process model on linear network") print(summary(object$fit)) terselevel <- spatstat.options('terse') if(waxlyrical('extras', terselevel)) splat("Original data:", object$Xname) if(waxlyrical('gory', terselevel)) print(summary(as.linnet(object))) return(invisible(NULL)) } plot.lppm <- function(x, ..., type="trend") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) dont.complain.about(y) do.call(plot, resolve.defaults(list(quote(y)), list(...), list(main=xname))) } anova.lppm <- function(object, ..., test=NULL) { stuff <- list(object=object, ...) if(!is.na(hit <- match("override", names(stuff)))) { warning("Argument 'override' is outdated and was ignored") stuff <- stuff[-hit] } #' extract ppm objects where appropriate mod <- sapply(stuff, is.lppm) stuff[mod] <- lapply(stuff[mod], getElement, name="fit") #' analysis of deviance or adjusted composite deviance do.call(anova.ppm, append(stuff, list(test=test))) } update.lppm <- function(object, ...) { stopifnot(inherits(object, "lppm")) X <- object$X fit <- object$fit Xname <- object$Xname callframe <- environment(formula(fit)) aargh <- list(...) islpp <- sapply(aargh, is.lpp) if(any(islpp)) { # trap point pattern argument & convert to quadscheme ii <- which(islpp) if((npp <- length(ii)) > 1) stop(paste("Arguments not understood:", npp, "lpp objects given")) X <- aargh[[ii]] aargh[[ii]] <- linequad(X) } isfmla <- sapply(aargh, inherits, what="formula") if(any(isfmla)) { # trap formula pattern argument, update it, evaluate LHS if required jj <- which(isfmla) if((nf <- length(jj)) > 1) stop(paste("Arguments not understood:", nf, "formulae given")) fmla <- aargh[[jj]] fmla <- update(formula(object), fmla) if(!is.null(lhs <- lhs.of.formula(fmla))) { X <- eval(lhs, envir=list2env(list("."=X), parent=callframe)) Qpos <- if(any(islpp)) ii else (length(aargh) + 1L) aargh[[Qpos]] <- linequad(X) } aargh[[jj]] <- rhs.of.formula(fmla) } newfit <- do.call(update.ppm, append(list(fit), aargh), envir=callframe) if(!is.poisson.ppm(newfit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=newfit, Xname=Xname) class(out) <- "lppm" return(out) } terms.lppm <- function(x, ...) { terms(x$fit, ...) } logLik.lppm <- function(object, ...) { logLik(object$fit, ...) } deviance.lppm <- function(object, ...) { deviance(object$fit, ...) } pseudoR2.lppm <- function(object, ..., keepoffset=TRUE) { dres <- deviance(object, ..., warn=FALSE) nullfmla <- . ~ 1 if(keepoffset && has.offset.term(object)) { off <- attr(model.depends(object), "offset") offterms <- row.names(off)[apply(off, 1, any)] if(length(offterms)) { nullrhs <- paste(offterms, collapse=" + ") nullfmla <- as.formula(paste(". ~ ", nullrhs)) } } nullmod <- update(object, nullfmla, forcefit=TRUE) dnul <- deviance(nullmod, warn=FALSE) return(1 - dres/dnul) } formula.lppm <- function(x, ...) { formula(x$fit, ...) } extractAIC.lppm <- function(fit, ...) { extractAIC(fit$fit, ...) } as.owin.lppm <- function(W, ..., fatal=TRUE) { stopifnot(inherits(W, "lppm")) as.owin(as.linnet(W), ..., fatal=fatal) } Window.lppm <- function(X, ...) { as.owin(X) } data.lppm <- function(object) { object$X } model.images.lppm <- local({ model.images.lppm <- function(object, L=as.linnet(object), ...) { stopifnot(inherits(object, "lppm")) stopifnot(inherits(L, "linnet")) m <- model.images(object$fit, W=as.rectangle(L), ...) if(length(m)) { ## restrict images to L type <- if(is.hyperframe(m)) "hyperframe" else if(is.imlist(m)) "imlist" else if(is.list(m) && all(sapply(m, is.im))) "imlist" else stop("Internal error: model.images not understood", call.=FALSE) switch(type, imlist = { ## list of images: convert to list of linims ZL <- netmask(L, template=m[[1L]]) m <- tolinims(m, L=L, imL=ZL) }, hyperframe = { ## hyperframe, each column being a list of images ## extract columns rownam <- row.names(m) m <- as.list(m) ZL <- netmask(L, template=m[[1L]][[1L]]) mm <- lapply(m, tolinims, L=L, imL=ZL) m <- do.call(hyperframe, mm) row.names(m) <- rownam }) } return(m) } netmask <- function(L, template) { as.im(as.mask.psp(as.psp(L), xy=as.mask(template))) } tolinim <- function(x, L, imL) linim(L, eval.im(x * imL), restrict=FALSE) tolinims <- function(x, L, imL) solapply(x, tolinim, L=L, imL=imL) model.images.lppm }) model.matrix.lppm <- function(object, data=model.frame(object, na.action=NULL), ..., keepNA=TRUE) { stopifnot(is.lppm(object)) if(missing(data)) data <- NULL model.matrix(object$fit, data=data, ..., keepNA=keepNA) } model.frame.lppm <- function(formula, ...) { stopifnot(inherits(formula, "lppm")) model.frame(formula$fit, ...) } domain.lppm <- as.linnet.lppm <- function(X, ...) { as.linnet(X$X, ...) } nobs.lppm <- function(object, ...) { npoints(object$X) } is.poisson.lppm <- function(x) { is.poisson(x$fit) } is.stationary.lppm <- function(x) { is.stationary(x$fit) } is.multitype.lppm <- function(X, ...) { is.multitype(X$fit) } is.marked.lppm <- function(X, ...) { is.marked(X$fit) } vcov.lppm <- function(object, ...) { if(!is.poisson(object)) stop("vcov.lppm is only implemented for Poisson models") vcov(object$fit, ...) } valid.lppm <- function(object, ...) { valid(object$fit, ...) } emend.lppm <- function(object, ...) { object$fit <- emend(object$fit, ...) return(object) } response.lppm <- function(object) { data.lppm(object) } spatstat.linnet/R/evalcovarlppm.R0000644000176200001440000002313614144334010016603 0ustar liggesusers#' #' evalcovarlppm.R #' #' evalCovar method for class lppm #' #' $Revision: 1.6 $ $Date: 2021/08/25 08:31:48 $ evalCovar.lppm <- local({ evalCovar.lppm <- function(model, covariate, ..., lambdatype=c("cif", "trend", "intensity"), eps=NULL, dimyx=NULL, xy=NULL, delta=NULL, nd=NULL, interpolate=TRUE, jitter=TRUE, jitterfactor=1, modelname=NULL, covname=NULL, dataname=NULL, subset=NULL) { lambdatype <- match.arg(lambdatype) #' evaluate covariate values at data points and at pixels ispois <- is.poisson(model) csr <- ispois && is.stationary(model) #' arguments controlling resolution pixels.given <- !(is.null(eps) && is.null(dimyx) && is.null(xy)) sampling.given <- !(is.null(delta) && is.null(nd)) resolution.given <- pixels.given || sampling.given #' determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Xname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, ispois=ispois, spacename="linear network") #' convert character covariate to function if(is.character(covariate)) { #' One of the characters 'x' or 'y' #' Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=xcoordfun, y=ycoordfun, stop(paste("Unrecognised covariate", dQuote(covariate)))) } #' extract model components X <- model$X fit <- model$fit #' L <- as.linnet(X) Q <- quad.ppm(fit) #' restrict to subset if required if(!is.null(subset)) { X <- X[subset] Q <- Q[subset] } isdat <- is.data(Q) U <- union.quad(Q) wt <- w.quad(Q) #' evaluate covariate if(!is.marked(model)) { #' ................... unmarked ....................... if(is.im(covariate)) { if(is.linim(covariate)) { type <- "linim" Zimage <- covariate if(resolution.given) Zimage <- as.linim(Zimage, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) } else { type <- "im" Zimage <- as.linim(covariate, L, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) } if(!interpolate) { #' look up covariate values at quadrature points Zvalues <- safelookup(covariate, U) } else { #' evaluate at quadrature points by interpolation Zvalues <- interp.im(covariate, U$x, U$y) #' fix boundary glitches if(any(uhoh <- is.na(Zvalues))) Zvalues[uhoh] <- safelookup(covariate, U[uhoh]) } #' extract data values ZX <- Zvalues[isdat] } else if(is.function(covariate)) { type <- "function" Zimage <- as.linim(covariate, L, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) #' evaluate exactly at quadrature points Zvalues <- covariate(U$x, U$y) if(!all(is.finite(Zvalues))) warning("covariate function returned NA or Inf values") #' extract data values ZX <- Zvalues[isdat] #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) #' corresponding fitted [conditional] intensity values lambda <- as.vector(predict(model, locations=U, type=lambdatype)) } else { #' ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(U, dfok=FALSE) possmarks <- levels(marx) #' single image: replicate if(is.im(covariate)) { covariate <- rep(list(covariate), length(possmarks)) names(covariate) <- possmarks } #' if(is.list(covariate) && all(sapply(covariate, is.im))) { #' list of images if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") #' determine type of data islinim <- sapply(covariate, is.linim) type <- if(all(islinim)) "linim" else "im" Zimage <- as.solist(covariate) #' convert 2D pixel images to 'linim' Zimage[!islinim] <- lapply(Zimage[!islinim], as.linim, L=L, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) if(resolution.given) Zimage[islinim] <- lapply(Zimage[!islinim], as.linim, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) #' evaluate covariate at each data point by interpolation Zvalues <- numeric(npoints(U)) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] if(!interpolate) { #' direct lookup values <- safelookup(covariate.k, U[ii]) } else { #' interpolation values <- interp.im(covariate.k, x=U$x[ii], y=U$y[ii]) #' fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, U[ii][uhoh]) } Zvalues[ii] <- values } #' extract data values ZX <- Zvalues[isdat] #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=U, type=lambdatype) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") } else if(is.function(covariate)) { type <- "function" #' evaluate exactly at quadrature points Zvalues <- functioncaller(x=U$x, y=U$y, m=marx, f=covariate, ...) #' functioncaller: function(x,y,m,f,...) { f(x,y,m,...) } #' extract data values ZX <- Zvalues[isdat] #' corresponding fitted [conditional] intensity values lambda <- predict(model, locations=U, type=lambdatype) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") #' images Zimage <- list() for(k in seq_along(possmarks)) Zimage[[k]] <- as.linim(functioncaller, L=L, m=possmarks[k], f=covariate, eps=eps, dimyx=dimyx, xy=xy, delta=delta, nd=nd) #' collapse function body to single string covname <- singlestring(covname) } else if(is.null(covariate)) { stop("The covariate is NULL", call.=FALSE) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y")), call.=FALSE) } #' .......................................................... #' apply jittering to avoid ties if(jitter) { ZX <- jitter(ZX, factor=jitterfactor) Zvalues <- jitter(Zvalues, factor=jitterfactor) } lambdaname <- if(is.poisson(model)) "intensity" else lambdatype lambdaname <- paste("the fitted", lambdaname) check.finite(lambda, xname=lambdaname, usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) #' lambda values at data points lambdaX <- predict(model, locations=X, type=lambdatype) #' lambda image(s) lambdaimage <- predict(model, type=lambdatype) #' restrict image to subset if(!is.null(subset)) { Zimage <- applySubset(Zimage, subset) lambdaimage <- applySubset(lambdaimage, subset) } #' wrap up values <- list(Zimage = Zimage, lambdaimage = lambdaimage, Zvalues = Zvalues, lambda = lambda, lambdaX = lambdaX, weights = wt, ZX = ZX, type = type) return(list(values=values, info=info)) } xcoordfun <- function(x,y,m){x} ycoordfun <- function(x,y,m){y} functioncaller <- function(x,y,m,f,...) { nf <- length(names(formals(f))) if(nf < 2) stop("Covariate function must have at least 2 arguments") value <- if(nf == 2) f(x,y) else if(nf == 3) f(x,y,m) else f(x,y,m,...) return(value) } applySubset <- function(X, subset) { if(is.im(X)) return(X[subset, drop=FALSE]) if(is.imlist(X)) return(solapply(X, "[", i=subset, drop=FALSE)) return(NULL) } evalCovar.lppm }) spatstat.linnet/R/nnfunlpp.R0000644000176200001440000000316714144334007015600 0ustar liggesusers# # nnfunlpp.R # # method for 'nnfun' for class 'lpp' # # $Revision: 1.3 $ $Date: 2019/09/16 10:14:18 $ # nnfun.lpp <- local({ nnfun.lpp <- function(X, ..., k=1, value=c("index", "mark")) { stopifnot(inherits(X, "lpp")) force(X) force(k) value <- match.arg(value) L <- as.linnet(X) switch(value, index = { fi <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { ## L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) i <- nncross.lpp(Y, X, what="which", k=k) return(i) } f <- linfun(fi, L) }, mark = { stopifnot(is.marked(X)) marx <- as.data.frame(marks(X))[,1] fm <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) i <- nncross.lpp(Y, X, what="which", k=k) return(marx[i]) } f <- linfun(fm, L) }) attr(f, "explain") <- uitleggen return(f) } uitleggen <- function(x, ...) { env <- environment(attr(x, "f")) X <- get("X", envir=env) k <- get("k", envir=env) if(identical(k, 1)) { cat("Nearest-neighbour function for lpp object\n") } else { cat("k-th nearest neighbour function for lpp object\n") cat(paste("k =", commasep(k), "\n")) } print(X) v <- mget("value", envir=env, ifnotfound=list(NULL))[[1L]] splat("Function returns the", if(identical(v, "mark")) "mark value" else "index", "of the neighbour") } nnfun.lpp }) spatstat.linnet/R/linnet.R0000644000176200001440000004452214144334007015231 0ustar liggesusers# # linnet.R # # Linear networks # # $Revision: 1.80 $ $Date: 2021/01/07 03:53:44 $ # # An object of class 'linnet' defines a linear network. # It includes the following components # # vertices (ppp) vertices of network # # m (matrix) adjacency matrix # # lines (psp) edges of network # # dpath (matrix) matrix of shortest path distances # between each pair of vertices # # from, to (vectors) map from edges to vertices. # The endpoints of the i-th segment lines[i] # are vertices[from[i]] and vertices[to[i]] # # # FUNCTIONS PROVIDED: # linnet creates an object of class "linnet" from data # print.linnet print an object of class "linnet" # plot.linnet plot an object of class "linnet" # # Make an object of class "linnet" from the minimal data linnet <- function(vertices, m, edges, sparse=FALSE, warn=TRUE) { if(missing(m) && missing(edges)) stop("specify either m or edges") if(!missing(m) && !missing(edges)) stop("do not specify both m and edges") # validate inputs stopifnot(is.ppp(vertices)) nv <- npoints(vertices) if(nv <= 1) { m <- matrix(FALSE, nv, nv) from <- to <- integer(0) } else if(!missing(m)) { # check logical matrix or logical sparse matrix if(!is.matrix(m) && !inherits(m, c("lgCMatrix", "lgTMatrix"))) stop("m should be a matrix or sparse matrix") stopifnot(is.logical(m) && isSymmetric(m)) if(nrow(m) != vertices$n) stop("dimensions of matrix m do not match number of vertices") if(any(diag(m))) { warning("diagonal entries of the matrix m should not be TRUE; ignored") diag(m) <- FALSE } sparse <- !is.matrix(m) ## determine 'from' and 'to' vectors ij <- which(m, arr.ind=TRUE) ij <- ij[ ij[,1L] < ij[,2L], , drop=FALSE] from <- ij[,1L] to <- ij[,2L] } else { ## check (from, to) pairs stopifnot(is.matrix(edges) && ncol(edges) == 2) if(any((edges %% 1) != 0)) stop("Entries of edges list should be integers") if(any(self <- (edges[,1L] == edges[,2L]))) { warning("edge list should not join a vertex to itself; ignored") edges <- edges[!self, , drop=FALSE] } np <- npoints(vertices) if(any(edges > np)) stop("index out-of-bounds in edges list") from <- edges[,1L] to <- edges[,2L] ## avoid duplication in either sense up <- (from < to) ee <- cbind(ifelse(up, from , to), ifelse(up, to, from)) if(anyDuplicated(ee)) { warning("Duplicated segments were ignored", call.=FALSE) ok <- !duplicated(ee) from <- from[ok] to <- to[ok] } ## convert to adjacency matrix if(!sparse) { m <- matrix(FALSE, np, np) m[edges] <- TRUE } else m <- sparseMatrix(i=from, j=to, x=TRUE, dims=c(np, np)) m <- m | t(m) } # create line segments xx <- vertices$x yy <- vertices$y lines <- psp(xx[from], yy[from], xx[to], yy[to], window=vertices$window, check=FALSE) # tolerance toler <- default.linnet.tolerance(lines) ## pack up out <- list(vertices=vertices, m=m, lines=lines, from=from, to=to, sparse=sparse, window=vertices$window, toler=toler) class(out) <- c("linnet", class(out)) ## finish ? if(sparse) return(out) # compute matrix of distances between adjacent vertices n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 d[m] <- pairdist(vertices)[m] ## now compute shortest-path distances between each pair of vertices out$dpath <- dpath <- dist2dpath(d) if(warn && any(is.infinite(dpath))) warning("Network is not connected", call.=FALSE) # pre-compute bounding radius out$boundingradius <- boundingradius(out) return(out) } print.linnet <- function(x, ...) { nv <- x$vertices$n nl <- x$lines$n splat("Linear network with", nv, ngettext(nv, "vertex", "vertices"), "and", nl, ngettext(nl, "line", "lines")) if(!is.null(br <- x$boundingradius) && is.infinite(br)) splat("[Network is not connected]") print(as.owin(x), prefix="Enclosing window: ") return(invisible(NULL)) } summary.linnet <- function(object, ...) { deg <- vertexdegree(object) sparse <- object$sparse %orifnull% is.null(object$dpath) result <- list(nvert = object$vertices$n, nline = object$lines$n, nedge = sum(deg)/2, unitinfo = summary(unitname(object)), totlength = sum(lengths_psp(object$lines)), maxdegree = max(deg), ncomponents = length(levels(connected(object, what="labels"))), win = as.owin(object), sparse = sparse) if(!sparse) { result$diam <- diameter(object) result$boundrad <- boundingradius(object) } result$toler <- object$toler class(result) <- c("summary.linnet", class(result)) result } print.summary.linnet <- function(x, ...) { dig <- getOption('digits') with(x, { splat("Linear network with", nvert, ngettext(nvert, "vertex", "vertices"), "and", nline, ngettext(nline, "line", "lines")) splat("Total length", signif(totlength, dig), unitinfo$plural, unitinfo$explain) splat("Maximum vertex degree:", maxdegree) if(sparse) splat("[Sparse matrix representation]") else splat("[Non-sparse matrix representation]") if(ncomponents > 1) { splat("Network is disconnected: ", ncomponents, "connected components") } else { splat("Network is connected") if(!sparse) { splat("Diameter:", signif(diam, dig), unitinfo$plural) splat("Bounding radius:", signif(boundrad, dig), unitinfo$plural) } } if(!is.null(x$toler)) splat("Numerical tolerance:", signif(x$toler, dig), unitinfo$plural) print(win, prefix="Enclosing window: ") }) return(invisible(NULL)) } plot.linnet <- function(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE, do.plot=TRUE) { if(is.null(main)) main <- short.deparse(substitute(x)) stopifnot(inherits(x, "linnet")) bb <- Frame(x) if(!do.plot) return(invisible(bb)) lines <- as.psp(x) if(!add) { # initialise new plot w <- as.owin(lines) if(window) plot(w, ..., main=main) else plot(w, ..., main=main, type="n") } # plot segments and (optionally) vertices do.call(plot, resolve.defaults(list(x=quote(lines), show.all=FALSE, add=TRUE, main=main), list(...))) if(vertices) plot(x$vertices, add=TRUE) return(invisible(bb)) } as.psp.linnet <- function(x, ..., fatal=TRUE) { verifyclass(x, "linnet", fatal=fatal) return(x$lines) } vertices.linnet <- function(w) { verifyclass(w, "linnet") return(w$vertices) } nvertices.linnet <- function(x, ...) { verifyclass(x, "linnet") return(x$vertices$n) } nsegments.linnet <- function(x) { return(x$lines$n) } Window.linnet <- function(X, ...) { return(X$window) } "Window<-.linnet" <- function(X, ..., check=TRUE, value) { if(check) { X <- X[value] } else { X$window <- value X$lines$window <- value X$vertices$window <- value } return(X) } as.owin.linnet <- function(W, ...) { return(Window(W)) } as.linnet <- function(X, ...) { UseMethod("as.linnet") } as.linnet.linnet <- function(X, ..., sparse, maxsize=30000) { if(missing(sparse)) return(X) if(is.null(X$sparse)) X$sparse <- is.null(X$dpath) if(sparse && !(X$sparse)) { # delete distance matrix X$dpath <- NULL # convert adjacency matrix to sparse matrix X$m <- as(X$m, "sparseMatrix") X$sparse <- TRUE } else if(!sparse && X$sparse) { # convert adjacency matrix to path-distance matrix nv <- nvertices(X) if(nv > maxsize) { stop(paste("Unable to create a matrix of size", nv, "x", nv, paren(paste("max permitted size", maxsize, "x", maxsize))), call.=FALSE) } X$m <- m <- as.matrix(X$m) edges <- which(m, arr.ind=TRUE) from <- edges[,1L] to <- edges[,2L] # compute distances to one-step neighbours n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 coo <- coords(vertices(X)) d[edges] <- sqrt(rowSums((coo[from, 1:2] - coo[to, 1:2])^2)) # compute shortest path distance matrix X$dpath <- dist2dpath(d) # compute bounding radius X$boundingradius <- boundingradius(X) X$sparse <- FALSE } else if(!sparse) { # possibly update internals X$boundingradius <- boundingradius(X) } # possibly update internals X$circumradius <- NULL X$toler <- default.linnet.tolerance(X) return(X) } as.linnet.psp <- function(X, ..., eps, sparse=FALSE) { X <- selfcut.psp(X) camefrom <- attr(X, "camefrom") V <- unique(endpoints.psp(X)) if(missing(eps) || is.null(eps)) { eps <- sqrt(.Machine$double.eps) * diameter(Frame(X)) } else { check.1.real(eps) stopifnot(eps >= 0) } if(eps > 0 && minnndist(V) <= eps) { gV <- marks(connected(V, eps)) xx <- as.numeric(by(V$x, gV, mean)) yy <- as.numeric(by(V$y, gV, mean)) V <- ppp(xx, yy, window=Window(X)) } first <- endpoints.psp(X, "first") second <- endpoints.psp(X, "second") from <- nncross(first, V, what="which") to <- nncross(second, V, what="which") if(any(reverse <- (from > to))) { newfrom <- ifelse(reverse, to, from) newto <- ifelse(reverse, from, to) from <- newfrom to <- newto } fromto <- cbind(from, to) nontrivial <- (from != to) & !duplicated(fromto) join <- fromto[nontrivial, , drop=FALSE] result <- linnet(V, edges=join, sparse=sparse) if(is.marked(X)) marks(result$lines) <- marks(X[nontrivial]) attr(result, "camefrom") <- camefrom[nontrivial] return(result) } unitname.linnet <- function(x) { unitname(x$window) } "unitname<-.linnet" <- function(x, value) { w <- x$window v <- x$vertices l <- x$lines unitname(w) <- unitname(v) <- unitname(l) <- value x$window <- w x$vertices <- v x$lines <- l return(x) } diameter.linnet <- function(x) { stopifnot(inherits(x, "linnet")) dpath <- x$dpath if(is.null(dpath)) return(NULL) else return(max(0, dpath)) } volume.linnet <- function(x) { sum(lengths_psp(x$lines)) } vertexdegree <- function(x) { verifyclass(x, "linnet") return(rowSums(x$m)) } circumradius.linnet <- function(x, ...) { .Deprecated("boundingradius.linnet") boundingradius.linnet(x, ...) } boundingradius.linnet <- function(x, ...) { stopifnot(inherits(x, "linnet")) cr <- x$boundingradius %orifnull% x$circumradius if(!is.null(cr)) return(cr) dpath <- x$dpath if(is.null(dpath)) return(NULL) if(any(is.infinite(dpath))) return(Inf) if(nrow(dpath) <= 1) return(max(0,dpath)) from <- x$from to <- x$to lines <- x$lines nseg <- lines$n leng <- lengths_psp(lines) if(spatstat.options("Clinearradius")) { fromC <- from - 1L toC <- to - 1L nv <- npoints(vertices(x)) huge <- sum(leng) z <- .C(SL_linearradius, ns = as.integer(nseg), from = as.integer(fromC), to = as.integer(toC), lengths = as.double(leng), nv = as.integer(nv), dpath = as.double(dpath), huge = as.double(huge), result = as.double(numeric(1)), PACKAGE="spatstat.linnet") return(z$result) } sA <- sB <- matrix(Inf, nseg, nseg) for(i in 1:nseg) { # endpoints of segment i A <- from[i] B <- to[i] AB <- leng[i] sA[i,i] <- sB[i,i] <- AB/2 for(j in (1:nseg)[-i]) { # endpoints of segment j C <- from[j] D <- to[j] CD <- leng[j] AC <- dpath[A,C] AD <- dpath[A,D] BC <- dpath[B,C] BD <- dpath[B,D] # max dist from A to any point in segment j sA[i,j] <- if(AD > AC + CD) AC + CD else if(AC > AD + CD) AD + CD else (AC + AD + CD)/2 # max dist from B to any point in segment j sB[i,j] <- if(BD > BC + CD) BC + CD else if(BC > BD + CD) BD + CD else (BC + BD + CD)/2 } } # max dist from each A to any point in another segment mA <- apply(sA, 1, max) # max dist from each B to any point in another segment mB <- apply(sB, 1, max) # min of these min(mA, mB) } #################################################### # affine transformations #################################################### scalardilate.linnet <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$vertices <- scalardilate(X$vertices, f=f) Y$lines <- scalardilate(X$lines, f=f) Y$window <- scalardilate(X$window, f=f) if(!is.null(X$dpath)) { Y$dpath <- f * X$dpath Y$boundingradius <- f * (X$boundingradius %orifnull% X$circumradius) Y$circumradius <- NULL } if(!is.null(X$toler)) X$toler <- makeLinnetTolerance(f * X$toler) return(Y) } affine.linnet <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "linnet") if(length(unique(eigen(mat)$values)) == 1) { # transformation is an isometry scal <- sqrt(abs(det(mat))) Y <- X Y$vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y$lines <- affine(X$lines, mat=mat, vec=vec, ...) Y$window <- affine(X$window, mat=mat, vec=vec, ...) if(!is.null(X$dpath)) { Y$dpath <- scal * X$dpath Y$boundingradius <- scal * (X$boundingradius %orifnull% X$circumradius) X$circumradius <- NULL } if(!is.null(Y$toler)) Y$toler <- makeLinnetTolerance(scal * Y$toler) } else { # general case vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y <- linnet(vertices, edges=cbind(X$from, X$to)) } return(Y) } shift.linnet <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "linnet") Y <- X if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; argument origin has precedence") locn <- interpretAsOrigin(origin, Window(X)) vec <- -locn } Y$window <- W <- shift(X$window, vec=vec, ...) v <- getlastshift(W) Y$vertices <- shift(X$vertices, vec=v, ...) Y$lines <- shift(X$lines, vec=v, ...) # tack on shift vector attr(Y, "lastshift") <- v return(Y) } rotate.linnet <- function(X, angle=pi/2, ..., centre=NULL) { verifyclass(X, "linnet") if(!is.null(centre)) { X <- shift(X, origin=centre) negorigin <- getlastshift(X) } else negorigin <- NULL Y <- X Y$vertices <- rotate(X$vertices, angle=angle, ...) Y$lines <- rotate(X$lines, angle=angle, ...) Y$window <- rotate(X$window, angle=angle, ...) if(!is.null(negorigin)) Y <- shift(Y, -negorigin) return(Y) } rescale.linnet <- function(X, s, unitname) { if(missing(unitname)) unitname <- NULL if(missing(s) || is.null(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s, unitname) return(Y) } "[.linnet" <- function(x, i, ..., snip=TRUE) { if(!is.owin(i)) stop("In [.linnet: the index i should be a window", call.=FALSE) x <- repairNetwork(x) w <- i wp <- as.polygonal(w) if(is.mask(w)) { ## protect against pixellation artefacts pixel <- owin(w$xstep * c(-1,1)/2, w$ystep * c(-1,1)/2) wp <- MinkowskiSum(wp, pixel) wp <- intersect.owin(wp, Frame(w)) } ## Find vertices that lie inside window vertinside <- inside.owin(x$vertices, w=wp) from <- x$from to <- x$to if(snip) { ## For efficiency, first restrict network to relevant segments. ## Find segments EITHER OF whose endpoints lie in 'w' ... okedge <- vertinside[from] | vertinside[to] ## ... or which cross the boundary of 'w' xlines <- as.psp(x) wlines <- edges(wp) if(any(miss <- !okedge)) { hits <- apply(test.crossing.psp(xlines[miss], wlines), 1, any) okedge[miss] <- hits } ## extract relevant subset of network graph x <- thinNetwork(x, retainedges=okedge) ## Now add vertices at crossing points with boundary of 'w' b <- unique(crossing.psp(xlines, wlines)) novel <- (nncross(b, x$vertices, what="dist") > 0) x <- insertVertices(x, b[novel]) boundarypoints <- attr(x, "id") ## update data from <- x$from to <- x$to vertinside <- inside.owin(x$vertices, w=wp) vertinside[boundarypoints] <- TRUE } ## find segments whose endpoints BOTH lie in 'w' edgeinside <- vertinside[from] & vertinside[to] ## .. and which are not trivial umap <- uniquemap(x$vertices) nontrivial <- (umap[from] != umap[to]) ## extract relevant subset of network xnew <- thinNetwork(x, retainedges=edgeinside & nontrivial) ## adjust window efficiently Window(xnew, check=FALSE) <- w return(xnew) } pixellate.linnet <- function(x, ...) { pixellate(as.psp(x), ...) } connected.linnet <- function(X, ..., what=c("labels", "components")) { verifyclass(X, "linnet") what <- match.arg(what) nv <- npoints(vertices(X)) lab0 <- cocoEngine(nv, X$from - 1L, X$to - 1L, "connected.linnet") lab <- lab0 + 1L lab <- factor(as.integer(factor(lab))) if(what == "labels") return(lab) nets <- list() subsets <- split(seq_len(nv), lab) for(i in seq_along(subsets)) nets[[i]] <- thinNetwork(X, retainvertices=subsets[[i]]) return(nets) } is.connected.linnet <- function(X, ...) { if(!is.null(dpath <- X$dpath)) return(all(is.finite(dpath))) lab <- connected(X, what="labels") npieces <- length(levels(lab)) return(npieces == 1) } crossing.linnet <- function(X, Y) { X <- as.linnet(X) if(!inherits(Y, c("linnet", "infline", "psp"))) stop("L should be an object of class psp, linnet or infline", call.=FALSE) ## convert infinite lines to segments if(inherits(Y, "linnet")) Y <- as.psp(Y) if(inherits(Y, "infline")) { Y <- clip.infline(Y, Frame(X)) id <- marks(Y) lev <- levels(id) } else { id <- lev <- seq_len(nsegments(Y)) } ## extract segments of network S <- as.psp(X) ## find crossing points SY <- crossing.psp(S, Y, fatal=FALSE, details=TRUE) if(is.null(SY) || npoints(SY) == 0) return(lpp(L=X)) SY <- as.data.frame(SY) Z <- with(as.data.frame(SY), as.lpp(x=x, y=y, seg=iA, tp=tA, L=X, marks=factor(id[as.integer(jB)], levels=lev))) return(Z) } density.linnet <- function(x, ...) { density.psp(as.psp(x), ...) } spatstat.linnet/MD50000644000176200001440000002720714155176075013740 0ustar liggesusers3ad6dacc6871277a639efde3182688ce *DESCRIPTION c39fb504d515db71f4cf011457e84d74 *NAMESPACE 8cf17804c298dae19a4a7ebdd980bdbd *NEWS 4e2027728562a7df69a9558edfe5e958 *R/First.R 996cc7d569033ee3b57e55ae3424c136 *R/Math.linim.R b2a5315260a817781aef01990325846d *R/Math.linimlist.R 5e69edf4803ca42b4cb8701c7a03bf9c *R/auclpp.R 745d2c94acfaef072958aa8891e50ff4 *R/bermanlpp.R 501b633681b9c3f97653ae6f52e34a1b *R/bw.lppl.R ccd5c9ba108ef62afc9bdc26a394d804 *R/cdftestlpp.R e317941f2ea501d3f8f31733bd17173b *R/clickjoin.R 46f71fabebc3f2cd3060bbc35dcab8d4 *R/clicklpp.R 5d2adc377a2ee300837cd37e4e344dfd *R/crossdistlpp.R 8e30273faaa99f9171814699ef9e51d4 *R/deldirnet.R 88f0d55249afe2f9ba14f9b75a6cfd9b *R/density.loo.R d5b93ba036518600b85852f9bc480e98 *R/density.lpp.R aee4f6bea13ca4c1d6ef890d8f046cb9 *R/densityfunlpp.R a466c687de5e192c49fa1fb4beb63b0e *R/densitylppVoronoi.R f8d4fb83fb4129516c7d964047cb054c *R/distfunlpp.R 9e8172e789391e6329e03e28139c65b3 *R/envelopelpp.R a778f87ebfab7174b5ed7884ceb15218 *R/evalcovarlppm.R fe829d4df6cdcd24b4719f2a418eb0c7 *R/heatapprox.R f35b11891e94b4f6b72d7ba5d5190ddb *R/lindirichlet.R 510dd2efb4f5c1c556a4af8a0a888685 *R/linearK.R ee8ad61f7ececcb8b114ca8039d49520 *R/linearKmulti.R 58b7587a21c756bd05290fa7f64de7a4 *R/lineardisc.R f2f04bfaec86cf00552471bb235c43db *R/linearmrkcon.R 045390cc3874a70ba2d34b23d79e9252 *R/linearpcf.R 799aa539c47b94518c01d996d4c44ff7 *R/linearpcfmulti.R 78edf8d9c2de8b98bfa5b4187b1ff48d *R/linequad.R ed00a650d2a7568c94bc61fe1919ce46 *R/linfun.R ac93a6463ef8e70254cdf44e0b74b598 *R/linim.R 47d2213f470d4e802223152bc48c8573 *R/linnet.R b25f43d971c26f4aacc5107c20b97409 *R/linnetsurgery.R d800917852761c7aceb8d6d04f5a9099 *R/lintess.R 61578189acfc370af73fd3011605c032 *R/lintessmakers.R 5eb73d763b2ca3192f00d4f17f702113 *R/lixellate.R 8247ff03c8e3b41fcaf9ef06f5eb3ea8 *R/lpp.R 3b0a8283bec0938950f87039872b2831 *R/lppm.R a6548c8d0ad0a2c8afa5085c8c0ebeee *R/nndistlpp.R c81eacba48ddf6dfc385ac6ddf2880d6 *R/nnfromvertex.R 7c991b3e96c24ed6e67e8e55d7d7e7df *R/nnfunlpp.R f1e1f35fdbf405243326f757f6e144ae *R/pairdistlpp.R fd78e1d8e3af355a2107b873a1289357 *R/perspex.R edd44b97b90778a213be91e179956572 *R/quickndirty.R 430884d1ee9395980779923f71d78005 *R/randomlpp.R 7bceda3656d8009808c3506528f2f04d *R/rcelllpp.R 5349ce4b07899f37e930c5c38d25950f *R/relrisk.lpp.R d3c05280d2a7db0ceaa01312f82b8187 *R/rhohatlpp.R 0f65cdccd7beae81f6103f752e29f652 *R/sdr.R 2a90c951536a4919bd46c0294fa4664e *R/simulatelppm.R 5028969a6d80e24270a27f4ffc2585be *R/subsetlpp.R b3c3d90d017e1fb9830bb9e20b76b07e *R/treebranches.R c322eb0238ba8347d3ac1238ce736ed1 *R/unstacklpp.R 6f2f4dd6b46a2edd246daafc2d147f4e *inst/CITATION bd3a112db7f310f4460dba025e234188 *inst/doc/packagesizes.txt 12e68895fef0d3aa0bde45a0ddbadfa4 *inst/ratfor/Makefile 22e8a5189942ba190c13475b35459c7f *inst/ratfor/dppll.r 6d471ec061ea91398ba16323de56b9db *inst/ratfor/inxypOld.r de0b5e2b05e4341b1f5de7261804a993 *man/Extract.linim.Rd dc6c0e3c01c25332b25485da48c1f918 *man/Extract.linnet.Rd 5538d78ad2f67efa3bbf9fea43483a5f *man/Extract.lpp.Rd 96a1f95bfb80cbfc3e915264c3330284 *man/Math.linim.Rd b747613a11bc6f45761958859dec5b48 *man/Replace.linim.Rd 22c7ba2d147503b82b008d909082595d *man/Window.lpp.Rd 8fc6c1a1a6109731a5a3c97d115d8d9a *man/addVertices.Rd ef8a00b237a0279520f8c333a533b44d *man/affine.linnet.Rd 66d775558a9ef05c271ab95c5f356a34 *man/affine.lpp.Rd 5fcd23067d4ca2c676f57bf3dc7c71d5 *man/anova.lppm.Rd 8adcb78436fb363368b0e07ca01cd986 *man/as.data.frame.lintess.Rd eba1a4da889bc772d9b0bd97a4564131 *man/as.linfun.Rd c834a96765f78f8b07a8b4d0c88a4d4d *man/as.linim.Rd ffbac6474298e0323e33c45b918f4437 *man/as.linnet.linim.Rd 58035a8895f76749aeb5a648ca62af94 *man/as.linnet.psp.Rd c0d6a037cdd0273c906d4cf76704b275 *man/as.lpp.Rd c786f4316c880c69b0851910a04c1192 *man/as.owin.lpp.Rd 3afbc97c70774d9eaba5e0271eb15c25 *man/auc.lpp.Rd 34bfe7bb20f5983fe4266325b268d30b *man/begins.Rd 1ad73c7893b9ce6d633a432dd4186ab1 *man/berman.test.lpp.Rd eed7fe041f626623f67b718b48f0c387 *man/branchlabelfun.Rd 80eec41072cfd3bbc163fe934baade1f *man/bw.lppl.Rd 9eaa638121d12857608208f7f9a597fc *man/bw.relrisklpp.Rd 1f67e56f6705a466e2b8690c1c9e4ef1 *man/bw.voronoi.Rd 43cee887601680a3f86f5fd6c4f6e257 *man/cdf.test.lpp.Rd b88629357239cce654b8c37c6826d84e *man/chop.linnet.Rd ca00f4d880a5cd81ce7d9a4b125bf2e0 *man/clickjoin.Rd 6dd835e65a81b6072b8d24272c47b600 *man/clicklpp.Rd 66c243bbd2743d13c1b5d5745b1f1d08 *man/connected.linnet.Rd 88ca69371eddcee1a59fc9d67a70a664 *man/connected.lpp.Rd c6cfe87748efc0f3a9c28ed8a3d2c830 *man/crossdist.lpp.Rd 58cac3a3319f60ca98f5572ad296bd41 *man/crossing.linnet.Rd a6cd74dea247cd759420023c3a9fd0ea *man/cut.lpp.Rd e1b7687066cc53ac3eb1a34be3f11073 *man/data.lppm.Rd 652c6ff5511e6a5ad1fd5c338590fef8 *man/delaunayNetwork.Rd c9aabaae8e19078decca8cb19c6b7ab5 *man/deletebranch.Rd 126919bffb368abd0b2f0c0ac937ba8a *man/density.linnet.Rd c2874355250b920465c413b4c819e7ae *man/density.lpp.Rd e331e6e39c66226a49839dc175a7073d *man/densityEqualSplit.Rd 1046cdcd965989580353a55fb82502fd *man/densityHeat.lpp.Rd 15d9010d789c15829b7e94a51877ac13 *man/densityQuick.lpp.Rd c71ab76dcec73111e75f4c32b109fb2b *man/densityVoronoi.lpp.Rd 68b2aed096d3bd0711229d111c9ed490 *man/densityfun.lpp.Rd 041bedc39fc1f905ec3c6964cbed8119 *man/diameter.linnet.Rd 92e019b56ff321bf83144aaf951a7d0a *man/distfun.lpp.Rd 35027fc5ae66fdf42ad3a35a6f6541ac *man/divide.linnet.Rd ab68c7adce7c1b9175c0221e2e35ce90 *man/domain.lpp.Rd 44298536cd06a45bd6016723c92328b5 *man/envelope.lpp.Rd d3112d72c72642d2909ddf2d278a2079 *man/eval.linim.Rd c0ea753a8c1dd4801157715779193173 *man/fitted.lppm.Rd f9c267dcbd153afc80cd79b48987d85e *man/heatkernelapprox.Rd 4f5197718334e9304c8ef143a6d7226b *man/identify.lpp.Rd ff5f99f553da23bfaf7994e3d9188f0d *man/insertVertices.Rd 2b223c64155d9d404755aef5b749efd9 *man/integral.linim.Rd e696007219b960dc30c74bf2cfdcd222 *man/intensity.lpp.Rd ca44c73cdbfc32eb5da72e358d82da2b *man/intersect.lintess.Rd ac39e11c2fcdf9100f3b8b128aab2e60 *man/is.connected.linnet.Rd 34a64af6e7f58829f3f3fd1bc3394c5c *man/is.marked.lppm.Rd a53d3158dda304e043c18892b9f9f094 *man/is.multitype.lpp.Rd 4ae7b8573f8cca6807f48e1fbcd67bc3 *man/is.multitype.lppm.Rd 5e81ceae6abb1f5b6c5ad45be1035151 *man/is.stationary.lppm.Rd e39e3467b59e996132b6ba3640302853 *man/joinVertices.Rd 5670b9dced04d800e69b6fe9a7b0166a *man/linearK.Rd ce43cb8660392964c2e766771b68fc9d *man/linearKcross.Rd 50804af1c37e0a2d37a7279602205a56 *man/linearKcross.inhom.Rd b34168736debb9feec579758eec3cdce *man/linearKdot.Rd 4a5a16cf10123e9df8447e7a4e9f5ced *man/linearKdot.inhom.Rd ec0561370f5cd03b012d18df42178259 *man/linearKinhom.Rd 61a9c0ee9a32fd5cb5cbf1f3c7cbe7a4 *man/lineardirichlet.Rd 878f90d72b1600799724e250c275a4eb *man/lineardisc.Rd 7ca8a8045a16062d3d3306e299ecc3c7 *man/linearmarkconnect.Rd 16ace3968caabdf3f139d164c724c7f9 *man/linearmarkequal.Rd 68490464e097e0b5444c9d6661239168 *man/linearpcf.Rd aeb70b3559ea3b541d02ee631babf963 *man/linearpcfcross.Rd 5d2aea8d327780ec2bc11bc4a17846bc *man/linearpcfcross.inhom.Rd 2c6a6641002bcd15a20b745e03a790c1 *man/linearpcfdot.Rd 308c6720f054d0b81e54874aeb43d014 *man/linearpcfdot.inhom.Rd 00213d6b96fdc117fcd312c2e252d0b3 *man/linearpcfinhom.Rd 018024056cf83c3027af86554319b9a8 *man/lineartileindex.Rd 0485f3313d6e643b6aecb49494c1726e *man/linequad.Rd 5b43edf08f04436cd9feae10cde06d7a *man/linfun.Rd 06adc9370b13c10de5c898592d0b4b26 *man/linim.Rd 4de15bf3495e0f096f80aac8a801c015 *man/linnet.Rd 97a44e84682a7cbf5f95f8f309ef5295 *man/lintess.Rd cc314c90ddb3ebc15144776d5d532b6e *man/lixellate.Rd 5e041885fdbf52c04d05b05709f75699 *man/lpp.Rd bf49bae5aaa3f6fed410486f152c7317 *man/lppm.Rd 390c43841b92ba3277f25852a55d2cc9 *man/macros/defns.Rd 1657245392b857202e66b18457ff823f *man/marks.lintess.Rd 29c7e9c341f6f886f03a1879daf361b7 *man/mean.linim.Rd c7092d0e6e5ec9cb37a4570dec353e2a *man/methods.linfun.Rd 10ab8ad9556b2f70246edf511bf869aa *man/methods.linim.Rd 190565fc704aaa3d4ff79018ae800a43 *man/methods.linnet.Rd 4ed37cbfbce39c9304d1dae049a5e776 *man/methods.lpp.Rd ebbf24f03eac5355d2daf8d4c60a6cdb *man/methods.lppm.Rd 29704aa0d739fa6d425fd73cc0352db6 *man/model.frame.lppm.Rd ba7cd2ba7970e4d6747a64c0f13aaa51 *man/model.images.lppm.Rd 0b64dbe4a98318e4a5ceea19799cd780 *man/model.matrix.lppm.Rd af1283be6eb9ac2adee7c074df7f2db8 *man/nncross.lpp.Rd b445f88bd84b3169493df3629fb87cb5 *man/nndist.lpp.Rd 6f96676af04f6ce9098bbbce11655196 *man/nnfromvertex.Rd e6121e183d7e1174506cfe6fd392289a *man/nnfun.lpp.Rd 85383deb36661f8e585646c68b914b59 *man/nnwhich.lpp.Rd 4047fbec39b7b0550e5beb45bc09c849 *man/pairdist.lpp.Rd 8b8744286691b257a63805405a626ed0 *man/pairs.linim.Rd 67d25d3d825677567736a8a961471c34 *man/persp.linfun.Rd a896542ce0828690fdd03f4a1979a569 *man/persp.linim.Rd b8ba4d99f9f9d158dad279a64c5f5dec *man/plot.linim.Rd c2a1e4dc6ad004174de60d028e7ee574 *man/plot.linnet.Rd 82dd0004ed38aeec5086d358b49d22d7 *man/plot.lintess.Rd aa59caa5ecc3fc10efa0b2ec3a5cfae1 *man/plot.lpp.Rd 8af4ffb510808a99e8df8abed117eedf *man/plot.lppm.Rd d8fc082a4e08900675049aa011262b07 *man/points.lpp.Rd 0a4a7f74691f676391a543f30d8c4a20 *man/predict.lppm.Rd 48c4fbbc65052ac4d422221f65c8b386 *man/pseudoR2.lppm.Rd c556143ebe93c4f2cd2c28f0515f9916 *man/rSwitzerlpp.Rd 2dd9734f2a1dae007f2b6038f2add499 *man/rcelllpp.Rd 284f0cfd95e0644a2efdca55c6319477 *man/relrisk.lpp.Rd b1b0c94ffacfa5a7eb705e05d0ee02f4 *man/repairNetwork.Rd d19112ba2c1355566209f8b535b8a097 *man/rhohat.lpp.Rd 138b02e4ce873f2e9aab6d9e58a57612 *man/rjitter.lpp.Rd 4c0dc89855eeaef976d52181c2ec7184 *man/rlpp.Rd 2a27171241b97d49cde44f7bd2c60f39 *man/roc.lpp.Rd 431cc7fdc28659d5404cbacc19720b52 *man/rpoislpp.Rd d5d02f9cd0793e69a1c46b8eadeca5a9 *man/runiflpp.Rd 33e8e34807a9457dec6fec188dedacf8 *man/sdr.lpp.Rd a77f193e9fc39cc11d662323d2194f43 *man/simulate.lppm.Rd a59f11b4908a004ed1fc505046a87ae0 *man/spatstat.linnet-deprecated.Rd 55db3b6bf010a3db463cbbcc12c1bccd *man/spatstat.linnet-internal.Rd d91a0e873247afab345c6eeeb93e1966 *man/spatstat.linnet-package.Rd b4cdfa355b6cfef19ac7088b1dc1436d *man/subset.lpp.Rd 9555c94642212f7cfbb22fe921eab696 *man/superimpose.lpp.Rd cf69a69207cc820da0d63970d6982de0 *man/text.lpp.Rd 42178ef5976cd4236fd523b4e87008a9 *man/thinNetwork.Rd b7e64c4e39d547e9bb2a2ad12ff3972a *man/tile.lengths.Rd 5cedb80d13c4da07658fd4ad4747080c *man/tilenames.lintess.Rd ac1d70b547af8d8efc12a3d4c28ee0ed *man/treebranchlabels.Rd a76fcd8c05b143c22a30edb3248e45a9 *man/treeprune.Rd 08b5a95602cbd0b54b2edd6a5aab0376 *man/unstack.lpp.Rd 542e6e474340e0aac3ed97f6a92777b4 *src/chunkloop.h 506ce48a40836f1ea425720b877dd1c6 *src/depthrel.c cf17972d9d9e7e56e490f627985c29b1 *src/heatapprox.c 30c8b027a364ad82a74cb13af9643279 *src/init.c 9deaa2fae0ea8dfdd0eae591de2395b5 *src/linScrossdist.c ffb582f82a4711133646fed8bb9fdb4a *src/linSnncross.c a5c1cf5b36b43dd6e485c244a8211e7d *src/linSnncross.h 03440da50a1197aa0a5f2645ef257915 *src/linSpairdist.c 1d1d667e5408040e485391e667f92c0b *src/linSpairdist.h da9323b044795fa30043cc7fa4cff039 *src/lincrossdist.c 08c783db42f88ddf4aeb22d785eebd7b *src/lineardisc.c 4c2ed032c9ade8f7b0def614b467e21e *src/linearradius.c db2178ff853619414cc697d9036df0fe *src/linequad.c 12f26fa5b753013dfa6c1fe83fb869ec *src/linequad.h f7d4ee820e1941b157ea7671c6c4d424 *src/linknnd.c 6a1fc45c0e2514ff3fbc64d70c08f32b *src/linknnd.h 68ffec5ced261473748373d870bb38cb *src/linnncross.c f5e9929e504c04aa15bca3d400e8161b *src/linnncross.h 41d4b67757c18834fdec28c49685464e *src/linnndist.c 553cab9435f615564c8627e9ef8c2604 *src/linpairdist.c f470c12dfd1ea5e5fb3131459716e77e *src/lintileindex.c 1414378013832e4d35a7609f780eb437 *src/linvdist.c 5cb3a7386ab9a7899aaac191ee381d5f *src/linvdist.h e677d804970b9268c88a6de8abdd252b *src/linvknndist.c 2d8992531002b91a3c0861b1e7f7df9b *src/lixel.c 2ce60be2a7ea7c1c95eecd83ebe7f50d *src/proto.h 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h ab5679cd1feb4ffcf364ca4fc776a5b4 *tests/testsAtoK.R 26ab4d7c3b4c5ccc24580815c374403a *tests/testsL.R 3413e7c8882fbfb1e04917fb6b131154 *tests/testsMtoZ.R spatstat.linnet/inst/0000755000176200001440000000000014141460471014364 5ustar liggesusersspatstat.linnet/inst/doc/0000755000176200001440000000000014141460471015131 5ustar liggesusersspatstat.linnet/inst/doc/packagesizes.txt0000755000176200001440000000167014155073245020356 0ustar liggesusersdate version nhelpfiles nobjects ndatasets Rlines srclines "2020-12-13" "1.3-0" 133 282 0 10490 3080 "2021-01-07" "1.65-0" 133 282 0 10491 3081 "2021-01-07" "1.65-0" 133 282 0 10491 3081 "2021-01-10" "1.65-1" 133 282 0 10491 3081 "2021-01-24" "1.65-2" 133 282 0 10491 3081 "2021-01-30" "1.65-3" 133 282 0 10491 3081 "2021-02-02" "1.65-4" 133 282 0 10492 3081 "2021-03-03" "1.65-8" 133 282 0 10492 3081 "2021-03-13" "1.65-9" 133 282 0 10492 3081 "2021-03-13" "1.65-9" 133 282 0 10492 3081 "2021-03-13" "1.65-9" 133 282 0 10492 3081 "2021-03-13" "1.65-9" 133 282 0 10492 3081 "2021-03-18" "2.0-0" 133 282 0 10509 3081 "2021-03-18" "2.0-0" 133 282 0 10509 3081 "2021-03-26" "2.1-0" 134 283 0 10595 3081 "2021-03-28" "2.1-1" 134 283 0 10595 3081 "2021-06-21" "2.2-0" 134 283 0 10599 3081 "2021-06-21" "2.2-0" 134 283 0 10599 3081 "2021-06-22" "2.2-1" 134 283 0 10599 3081 "2021-07-16" "2.3-0" 137 287 0 10920 3232 "2021-12-11" "2.3-1" 138 289 0 10947 3232 spatstat.linnet/inst/ratfor/0000755000176200001440000000000014141460471015661 5ustar liggesusersspatstat.linnet/inst/ratfor/inxypOld.r0000755000176200001440000000216314141460471017657 0ustar liggesuserssubroutine inxyp(x,y,xp,yp,npts,nedges,score,onbndry) implicit double precision(a-h,o-z) dimension x(npts), y(npts), xp(nedges), yp(nedges), score(npts) logical first, onbndry(npts) zero = 0.0d0 half = 0.5d0 one = 1.0d0 do i = 1,nedges { x0 = xp(i) y0 = yp(i) if(i == nedges) { x1 = xp(1) y1 = yp(1) } else { x1 = xp(i+1) y1 = yp(i+1) } dx = x1 - x0 dy = y1 - y0 do j = 1,npts { xcrit = (x(j) - x0)*(x(j) - x1) if(xcrit <= zero) { if(xcrit == zero) { contrib = half } else { contrib = one } ycrit = y(j)*dx - x(j)*dy + x0*dy - y0*dx if(dx < 0) { if(ycrit >= zero) { score(j) = score(j) + contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else if(dx > zero) { if(ycrit < zero) { score(j) = score(j) - contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else { if(x(j) == x0) { ycrit = (y(j) - y0)*(y(j) - y1) } onbndry(j) = onbndry(j) | (ycrit <= zero) } } } } return end spatstat.linnet/inst/ratfor/dppll.r0000755000176200001440000000203314141460471017160 0ustar liggesuserssubroutine dppll(x,y,l1,l2,l3,l4,np,nl,eps,mint,rslt,xmin,jmin) implicit double precision(a-h,o-z) dimension x(np), y(np), rslt(np,nl), xmin(np), jmin(np) double precision l1(nl), l2(nl), l3(nl), l4(nl) one = 1.d0 zero = 0.d0 do j = 1,nl { dx = l3(j) - l1(j) dy = l4(j) - l2(j) alen = sqrt(dx**2 + dy**2) if(alen .gt. eps) { co = dx/alen si = dy/alen } else { co = 0.5 si = 0.5 } do i = 1, np { xpx1 = x(i) - l1(j) ypy1 = y(i) - l2(j) xpx2 = x(i) - l3(j) ypy2 = y(i) - l4(j) d1 = xpx1**2 + ypy1**2 d2 = xpx2**2 + ypy2**2 dd = min(d1,d2) if(alen .gt. eps) { xpr = xpx1*co + ypy1*si if(xpr .lt. zero .or. xpr .gt. alen) { d3 = -one } else { ypr = - xpx1*si + ypy1*co d3 = ypr**2 } } else { d3 = -one } if(d3 .ge. zero) { dd = min(dd,d3) } sd =sqrt(dd) rslt(i,j) = sd if(mint.gt.0) { if(sd .lt. xmin(i)) { xmin(i) = sd if(mint.gt.1) { jmin(i) = j } } } } } return end spatstat.linnet/inst/ratfor/Makefile0000755000176200001440000000244414155073245017334 0ustar liggesusers RATFOR = /home/adrian/bin/ratfor77 #RATFOR = /usr/local/bin/ratfor CPP = /usr/bin/cpp ########################################################## # Sources actually written by humans: RAT_SRC = dppll.r inxypOld.r C_DOMINIC = dinfty.c dwpure.c C_MISC = raster.h areadiff.c closepair.c connect.c corrections.c \ discarea.c distances.c distmapbin.c distseg.c \ exactdist.c exactPdist.c \ massdisthack.c poly2im.c trigraf.c utils.c xyseg.c C_MH = methas.h dist2.h areaint.c badgey.c dgs.c \ diggra.c dist2.c fexitc.c getcif.c geyer.c \ lookup.c methas.c stfcr.c \ straush.c straushm.c strauss.c straussm.c C_KEST = Kloop.h Kborder.c C_SRC = $(C_DOMINIC) $(C_MISC) $(C_MH) $(C_KEST) CC_SRC = PerfectStrauss.cc HUMAN = $(RAT_SRC) $(C_SRC) $(CC_SRC) Makefile ########################################################## # Source to be generated automatically: RAT_FOR = dppll.f inxypOld.f GENERATED = $(RAT_FOR) ###################################################### ########### TARGETS ################################ target: $(GENERATED) @echo -- Done ------- tar: tar cvf src.tar $(HUMAN) clean: rm $(GENERATED) -rm src.tar ####################################################### ######### RULES ################################## .r.f: $(RATFOR) -o $@ $? spatstat.linnet/inst/CITATION0000755000176200001440000000501314141460471015523 0ustar liggesuserscitHeader("To cite spatstat in publications use:") citEntry(entry = "Book", title = "Spatial Point Patterns: Methodology and Applications with {R}", author = personList(as.person("Adrian Baddeley"), as.person("Ege Rubak"), as.person("Rolf Turner")), year = "2015", publisher = "Chapman and Hall/CRC Press", address = "London", url="https://www.routledge.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/", textVersion = paste("Adrian Baddeley, Ege Rubak, Rolf Turner (2015).", "Spatial Point Patterns: Methodology and Applications with R.", "London: Chapman and Hall/CRC Press, 2015.", "URL https://www.routledge.com/Spatial-Point-Patterns-Methodology-and-Applications-with-R/Baddeley-Rubak-Turner/9781482210200/") ) citEntry(entry = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner"), as.person("Jorge Mateu"), as.person("Andrew Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", url = "https://www.jstatsoft.org/v55/i11/", textVersion = paste("Adrian Baddeley, Rolf Turner, Jorge Mateu, Andrew Bevan (2013).", "Hybrids of Gibbs Point Process Models and Their Implementation.", "Journal of Statistical Software, 55(11), 1-43.", "URL https://www.jstatsoft.org/v55/i11/."), header = "If you use hybrid models, please also cite:" ) citEntry(entry = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", url = "https://www.jstatsoft.org/v12/i06/", textVersion = paste("Adrian Baddeley, Rolf Turner (2005).", "spatstat: An R Package for Analyzing Spatial Point Patterns.", "Journal of Statistical Software 12(6), 1-42.", "URL https://www.jstatsoft.org/v12/i06/."), header = "In survey articles, please cite the original paper on spatstat:" )